repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
ofir123/py-printer
|
pyprinter/printer.py
|
get_printer
|
python
|
def get_printer(colors: bool = True, width_limit: bool = True, disabled: bool = False) -> Printer:
global _printer
global _colors
# Make sure we can print colors if needed.
colors = colors and _colors
# If the printer was never defined before, or the settings have changed.
if not _printer or (colors != _printer._colors) or (width_limit != _printer._width_limit):
_printer = Printer(DefaultWriter(disabled=disabled), colors=colors, width_limit=width_limit)
return _printer
|
Returns an already initialized instance of the printer.
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
:param disabled: If True, nothing will be printed.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L328-L343
| null |
import os
import re
import subprocess
import sys
from typing import List, Optional
# True if printer is in QT console context.
_IN_QT = None
class DefaultWriter:
"""
A default writing stream.
"""
def __init__(self, output_file=None, disabled: bool = False):
"""
Initializes the default writer.
:param output_file: The output file to write to (default is IPython's io.stdout).
:param disabled: If True, nothing will be printed.
"""
self.output_file = output_file or sys.stdout
self.disabled = disabled
def write(self, text: str):
if not self.disabled:
print(text, end='', file=self.output_file)
class _TextGroup:
"""
This class is a context manager that adds indentation before the text it prints.
It should only be created by specific methods of the Printer class.
"""
def __init__(self, printer, unit: int, add_line: bool):
self.printer = printer
self.unit = unit
self._add_line = add_line
def __enter__(self):
# Treat this like a new line.
if self.printer._in_line:
self.printer._is_first_line = True
self.printer._indents.append(self.unit)
self.printer.indents_sum += self.unit
def __exit__(self, exc_type, exc_val, exc_tb):
self.printer._is_first_line = False
self.printer._indents.pop()
self.printer.indents_sum -= self.unit
# Treat this like a line break.
if self._add_line and self.printer._in_line:
self.printer.write_line()
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line)
def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
_printer = None
# Colors won't work on Linux if TERM is not defined.
_colors = os.name == 'nt' or os.getenv('TERM')
# If we're not inside IPython, use pyreadline's console.
if os.name == 'nt' and sys.stdout == sys.__stdout__:
try:
assert __IPYTHON__
except NameError:
try:
from pyreadline.console.console import Console
_printer = Printer(Console())
except ImportError:
# If all failed, just print without colors.
_colors = False
def _get_windows_console_width() -> int:
"""
A small utility function for getting the current console window's width, in Windows.
:return: The current console window's width.
"""
from ctypes import byref, windll
import pyreadline
out = windll.kernel32.GetStdHandle(-11)
info = pyreadline.console.CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(out, byref(info))
return info.dwSize.X
def _get_linux_console_width() -> int:
# Don't run tput if TERM is not defined, to prevent terminal-related errors.
if os.getenv('TERM'):
return int(subprocess.check_output(['tput', 'cols']))
return 0
def _in_qtconsole() -> bool:
"""
A small utility function which determines if we're running in QTConsole's context.
"""
try:
from IPython import get_ipython
try:
from ipykernel.zmqshell import ZMQInteractiveShell
shell_object = ZMQInteractiveShell
except ImportError:
from IPython.kernel.zmq import zmqshell
shell_object = zmqshell.ZMQInteractiveShell
return isinstance(get_ipython(), shell_object)
except Exception:
return False
def get_console_width() -> int:
"""
A small utility function for getting the current console window's width.
:return: The current console window's width.
"""
# Assigning the value once, as frequent call to this function
# causes a major slow down(ImportErrors + isinstance).
global _IN_QT
if _IN_QT is None:
_IN_QT = _in_qtconsole()
try:
if _IN_QT:
# QTConsole determines and handles the max line length by itself.
width = sys.maxsize
else:
width = _get_windows_console_width() if os.name == 'nt' else _get_linux_console_width()
if width <= 0:
return 80
return width
except Exception:
# Default value.
return 80
__all__ = ['get_printer', 'get_console_width', 'Printer', 'DefaultWriter']
|
ofir123/py-printer
|
pyprinter/printer.py
|
_get_windows_console_width
|
python
|
def _get_windows_console_width() -> int:
from ctypes import byref, windll
import pyreadline
out = windll.kernel32.GetStdHandle(-11)
info = pyreadline.console.CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(out, byref(info))
return info.dwSize.X
|
A small utility function for getting the current console window's width, in Windows.
:return: The current console window's width.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L346-L358
| null |
import os
import re
import subprocess
import sys
from typing import List, Optional
# True if printer is in QT console context.
_IN_QT = None
class DefaultWriter:
"""
A default writing stream.
"""
def __init__(self, output_file=None, disabled: bool = False):
"""
Initializes the default writer.
:param output_file: The output file to write to (default is IPython's io.stdout).
:param disabled: If True, nothing will be printed.
"""
self.output_file = output_file or sys.stdout
self.disabled = disabled
def write(self, text: str):
if not self.disabled:
print(text, end='', file=self.output_file)
class _TextGroup:
"""
This class is a context manager that adds indentation before the text it prints.
It should only be created by specific methods of the Printer class.
"""
def __init__(self, printer, unit: int, add_line: bool):
self.printer = printer
self.unit = unit
self._add_line = add_line
def __enter__(self):
# Treat this like a new line.
if self.printer._in_line:
self.printer._is_first_line = True
self.printer._indents.append(self.unit)
self.printer.indents_sum += self.unit
def __exit__(self, exc_type, exc_val, exc_tb):
self.printer._is_first_line = False
self.printer._indents.pop()
self.printer.indents_sum -= self.unit
# Treat this like a line break.
if self._add_line and self.printer._in_line:
self.printer.write_line()
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line)
def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
_printer = None
# Colors won't work on Linux if TERM is not defined.
_colors = os.name == 'nt' or os.getenv('TERM')
# If we're not inside IPython, use pyreadline's console.
if os.name == 'nt' and sys.stdout == sys.__stdout__:
try:
assert __IPYTHON__
except NameError:
try:
from pyreadline.console.console import Console
_printer = Printer(Console())
except ImportError:
# If all failed, just print without colors.
_colors = False
def get_printer(colors: bool = True, width_limit: bool = True, disabled: bool = False) -> Printer:
"""
Returns an already initialized instance of the printer.
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
:param disabled: If True, nothing will be printed.
"""
global _printer
global _colors
# Make sure we can print colors if needed.
colors = colors and _colors
# If the printer was never defined before, or the settings have changed.
if not _printer or (colors != _printer._colors) or (width_limit != _printer._width_limit):
_printer = Printer(DefaultWriter(disabled=disabled), colors=colors, width_limit=width_limit)
return _printer
def _get_linux_console_width() -> int:
# Don't run tput if TERM is not defined, to prevent terminal-related errors.
if os.getenv('TERM'):
return int(subprocess.check_output(['tput', 'cols']))
return 0
def _in_qtconsole() -> bool:
"""
A small utility function which determines if we're running in QTConsole's context.
"""
try:
from IPython import get_ipython
try:
from ipykernel.zmqshell import ZMQInteractiveShell
shell_object = ZMQInteractiveShell
except ImportError:
from IPython.kernel.zmq import zmqshell
shell_object = zmqshell.ZMQInteractiveShell
return isinstance(get_ipython(), shell_object)
except Exception:
return False
def get_console_width() -> int:
"""
A small utility function for getting the current console window's width.
:return: The current console window's width.
"""
# Assigning the value once, as frequent call to this function
# causes a major slow down(ImportErrors + isinstance).
global _IN_QT
if _IN_QT is None:
_IN_QT = _in_qtconsole()
try:
if _IN_QT:
# QTConsole determines and handles the max line length by itself.
width = sys.maxsize
else:
width = _get_windows_console_width() if os.name == 'nt' else _get_linux_console_width()
if width <= 0:
return 80
return width
except Exception:
# Default value.
return 80
__all__ = ['get_printer', 'get_console_width', 'Printer', 'DefaultWriter']
|
ofir123/py-printer
|
pyprinter/printer.py
|
_in_qtconsole
|
python
|
def _in_qtconsole() -> bool:
try:
from IPython import get_ipython
try:
from ipykernel.zmqshell import ZMQInteractiveShell
shell_object = ZMQInteractiveShell
except ImportError:
from IPython.kernel.zmq import zmqshell
shell_object = zmqshell.ZMQInteractiveShell
return isinstance(get_ipython(), shell_object)
except Exception:
return False
|
A small utility function which determines if we're running in QTConsole's context.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L368-L382
| null |
import os
import re
import subprocess
import sys
from typing import List, Optional
# True if printer is in QT console context.
_IN_QT = None
class DefaultWriter:
"""
A default writing stream.
"""
def __init__(self, output_file=None, disabled: bool = False):
"""
Initializes the default writer.
:param output_file: The output file to write to (default is IPython's io.stdout).
:param disabled: If True, nothing will be printed.
"""
self.output_file = output_file or sys.stdout
self.disabled = disabled
def write(self, text: str):
if not self.disabled:
print(text, end='', file=self.output_file)
class _TextGroup:
"""
This class is a context manager that adds indentation before the text it prints.
It should only be created by specific methods of the Printer class.
"""
def __init__(self, printer, unit: int, add_line: bool):
self.printer = printer
self.unit = unit
self._add_line = add_line
def __enter__(self):
# Treat this like a new line.
if self.printer._in_line:
self.printer._is_first_line = True
self.printer._indents.append(self.unit)
self.printer.indents_sum += self.unit
def __exit__(self, exc_type, exc_val, exc_tb):
self.printer._is_first_line = False
self.printer._indents.pop()
self.printer.indents_sum -= self.unit
# Treat this like a line break.
if self._add_line and self.printer._in_line:
self.printer.write_line()
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line)
def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
_printer = None
# Colors won't work on Linux if TERM is not defined.
_colors = os.name == 'nt' or os.getenv('TERM')
# If we're not inside IPython, use pyreadline's console.
if os.name == 'nt' and sys.stdout == sys.__stdout__:
try:
assert __IPYTHON__
except NameError:
try:
from pyreadline.console.console import Console
_printer = Printer(Console())
except ImportError:
# If all failed, just print without colors.
_colors = False
def get_printer(colors: bool = True, width_limit: bool = True, disabled: bool = False) -> Printer:
"""
Returns an already initialized instance of the printer.
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
:param disabled: If True, nothing will be printed.
"""
global _printer
global _colors
# Make sure we can print colors if needed.
colors = colors and _colors
# If the printer was never defined before, or the settings have changed.
if not _printer or (colors != _printer._colors) or (width_limit != _printer._width_limit):
_printer = Printer(DefaultWriter(disabled=disabled), colors=colors, width_limit=width_limit)
return _printer
def _get_windows_console_width() -> int:
"""
A small utility function for getting the current console window's width, in Windows.
:return: The current console window's width.
"""
from ctypes import byref, windll
import pyreadline
out = windll.kernel32.GetStdHandle(-11)
info = pyreadline.console.CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(out, byref(info))
return info.dwSize.X
def _get_linux_console_width() -> int:
# Don't run tput if TERM is not defined, to prevent terminal-related errors.
if os.getenv('TERM'):
return int(subprocess.check_output(['tput', 'cols']))
return 0
def get_console_width() -> int:
"""
A small utility function for getting the current console window's width.
:return: The current console window's width.
"""
# Assigning the value once, as frequent call to this function
# causes a major slow down(ImportErrors + isinstance).
global _IN_QT
if _IN_QT is None:
_IN_QT = _in_qtconsole()
try:
if _IN_QT:
# QTConsole determines and handles the max line length by itself.
width = sys.maxsize
else:
width = _get_windows_console_width() if os.name == 'nt' else _get_linux_console_width()
if width <= 0:
return 80
return width
except Exception:
# Default value.
return 80
__all__ = ['get_printer', 'get_console_width', 'Printer', 'DefaultWriter']
|
ofir123/py-printer
|
pyprinter/printer.py
|
get_console_width
|
python
|
def get_console_width() -> int:
# Assigning the value once, as frequent call to this function
# causes a major slow down(ImportErrors + isinstance).
global _IN_QT
if _IN_QT is None:
_IN_QT = _in_qtconsole()
try:
if _IN_QT:
# QTConsole determines and handles the max line length by itself.
width = sys.maxsize
else:
width = _get_windows_console_width() if os.name == 'nt' else _get_linux_console_width()
if width <= 0:
return 80
return width
except Exception:
# Default value.
return 80
|
A small utility function for getting the current console window's width.
:return: The current console window's width.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L385-L408
| null |
import os
import re
import subprocess
import sys
from typing import List, Optional
# True if printer is in QT console context.
_IN_QT = None
class DefaultWriter:
"""
A default writing stream.
"""
def __init__(self, output_file=None, disabled: bool = False):
"""
Initializes the default writer.
:param output_file: The output file to write to (default is IPython's io.stdout).
:param disabled: If True, nothing will be printed.
"""
self.output_file = output_file or sys.stdout
self.disabled = disabled
def write(self, text: str):
if not self.disabled:
print(text, end='', file=self.output_file)
class _TextGroup:
"""
This class is a context manager that adds indentation before the text it prints.
It should only be created by specific methods of the Printer class.
"""
def __init__(self, printer, unit: int, add_line: bool):
self.printer = printer
self.unit = unit
self._add_line = add_line
def __enter__(self):
# Treat this like a new line.
if self.printer._in_line:
self.printer._is_first_line = True
self.printer._indents.append(self.unit)
self.printer.indents_sum += self.unit
def __exit__(self, exc_type, exc_val, exc_tb):
self.printer._is_first_line = False
self.printer._indents.pop()
self.printer.indents_sum -= self.unit
# Treat this like a line break.
if self._add_line and self.printer._in_line:
self.printer.write_line()
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line)
def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
_printer = None
# Colors won't work on Linux if TERM is not defined.
_colors = os.name == 'nt' or os.getenv('TERM')
# If we're not inside IPython, use pyreadline's console.
if os.name == 'nt' and sys.stdout == sys.__stdout__:
try:
assert __IPYTHON__
except NameError:
try:
from pyreadline.console.console import Console
_printer = Printer(Console())
except ImportError:
# If all failed, just print without colors.
_colors = False
def get_printer(colors: bool = True, width_limit: bool = True, disabled: bool = False) -> Printer:
"""
Returns an already initialized instance of the printer.
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
:param disabled: If True, nothing will be printed.
"""
global _printer
global _colors
# Make sure we can print colors if needed.
colors = colors and _colors
# If the printer was never defined before, or the settings have changed.
if not _printer or (colors != _printer._colors) or (width_limit != _printer._width_limit):
_printer = Printer(DefaultWriter(disabled=disabled), colors=colors, width_limit=width_limit)
return _printer
def _get_windows_console_width() -> int:
"""
A small utility function for getting the current console window's width, in Windows.
:return: The current console window's width.
"""
from ctypes import byref, windll
import pyreadline
out = windll.kernel32.GetStdHandle(-11)
info = pyreadline.console.CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(out, byref(info))
return info.dwSize.X
def _get_linux_console_width() -> int:
# Don't run tput if TERM is not defined, to prevent terminal-related errors.
if os.getenv('TERM'):
return int(subprocess.check_output(['tput', 'cols']))
return 0
def _in_qtconsole() -> bool:
"""
A small utility function which determines if we're running in QTConsole's context.
"""
try:
from IPython import get_ipython
try:
from ipykernel.zmqshell import ZMQInteractiveShell
shell_object = ZMQInteractiveShell
except ImportError:
from IPython.kernel.zmq import zmqshell
shell_object = zmqshell.ZMQInteractiveShell
return isinstance(get_ipython(), shell_object)
except Exception:
return False
__all__ = ['get_printer', 'get_console_width', 'Printer', 'DefaultWriter']
|
ofir123/py-printer
|
pyprinter/printer.py
|
Printer.group
|
python
|
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
return _TextGroup(self, indent, add_line)
|
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L112-L120
| null |
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
|
ofir123/py-printer
|
pyprinter/printer.py
|
Printer._split_lines
|
python
|
def _split_lines(self, original_lines: List[str]) -> List[str]:
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
|
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L122-L179
| null |
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line)
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
|
ofir123/py-printer
|
pyprinter/printer.py
|
Printer.write
|
python
|
def write(self, text: str):
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
|
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L181-L233
|
[
"def _split_lines(self, original_lines: List[str]) -> List[str]:\n \"\"\"\n Splits the original lines list according to the current console width and group indentations.\n\n :param original_lines: The original lines list to split.\n :return: A list of the new width-formatted lines.\n \"\"\"\n console_width = get_console_width()\n # We take indent into account only in the inner group lines.\n max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \\\n (self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])\n\n lines = []\n for i, line in enumerate(original_lines):\n fixed_line = []\n colors_counter = 0\n line_index = 0\n while line_index < len(line):\n c = line[line_index]\n\n # Check if we're in a color block.\n if self._colors and c == self._ANSI_COLOR_PREFIX and \\\n len(line) >= (line_index + self._ANSI_COLOR_LENGTH):\n current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]\n # If it really is a color, skip it.\n if self._ANSI_REGEXP.match(current_color):\n line_index += self._ANSI_COLOR_LENGTH\n fixed_line.extend(list(current_color))\n colors_counter += 1\n continue\n fixed_line.append(line[line_index])\n line_index += 1\n\n # Create a new line, if max line is reached.\n if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):\n # Special case in which we want to split right before the line break.\n if len(line) > line_index and line[line_index] == self.LINE_SEP:\n continue\n line_string = ''.join(fixed_line)\n if not line_string.endswith(self.LINE_SEP):\n line_string += self.LINE_SEP\n lines.append(line_string)\n fixed_line = []\n colors_counter = 0\n self._last_position = 0\n # Max line length has changed since the last position is now 0.\n max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum\n self._is_first_line = False\n\n if len(fixed_line) > 0:\n fixed_line = ''.join(fixed_line)\n # If this line contains only color codes, attach it to the last line instead of creating a new one.\n if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \\\n len(lines) > 0:\n lines[-1] = lines[-1][:-1] + fixed_line\n else:\n lines.append(fixed_line)\n return lines\n"
] |
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line)
def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
|
ofir123/py-printer
|
pyprinter/printer.py
|
Printer.write_aligned
|
python
|
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
|
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L244-L281
|
[
"def get_console_width() -> int:\n \"\"\"\n A small utility function for getting the current console window's width.\n\n :return: The current console window's width.\n \"\"\"\n # Assigning the value once, as frequent call to this function\n # causes a major slow down(ImportErrors + isinstance).\n global _IN_QT\n if _IN_QT is None:\n _IN_QT = _in_qtconsole()\n\n try:\n if _IN_QT:\n # QTConsole determines and handles the max line length by itself.\n width = sys.maxsize\n else:\n width = _get_windows_console_width() if os.name == 'nt' else _get_linux_console_width()\n if width <= 0:\n return 80\n return width\n except Exception:\n # Default value.\n return 80\n",
"def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:\n \"\"\"\n Returns a context manager which adds an indentation before each line.\n\n :param indent: Number of spaces to print.\n :param add_line: If True, a new line will be printed after the group.\n :return: A TextGroup context manager.\n \"\"\"\n return _TextGroup(self, indent, add_line)\n",
"def write(self, text: str):\n \"\"\"\n Prints text to the screen.\n Supports colors by using the color constants.\n To use colors, add the color before the text you want to print.\n\n :param text: The text to print.\n \"\"\"\n # Default color is NORMAL.\n last_color = (self._DARK_CODE, 0)\n # We use splitlines with keepends in order to keep the line breaks.\n # Then we split by using the console width.\n original_lines = text.splitlines(True)\n lines = self._split_lines(original_lines) if self._width_limit else original_lines\n\n # Print the new width-formatted lines.\n for line in lines:\n # Print indents only at line beginnings.\n if not self._in_line:\n self._writer.write(' ' * self.indents_sum)\n # Remove colors if needed.\n if not self._colors:\n for color_code in self._ANSI_REGEXP.findall(line):\n line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')\n elif not self._ANSI_REGEXP.match(line):\n # Check if the line starts with a color. If not, we apply the color from the last line.\n line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line\n # Print the final line.\n self._writer.write(line)\n # Update the in_line status.\n self._in_line = not line.endswith(self.LINE_SEP)\n # Update the last color used.\n if self._colors:\n last_color = self._ANSI_REGEXP.findall(line)[-1]\n\n # Update last position (if there was no line break in the end).\n if len(lines) > 0:\n last_line = lines[-1]\n if not last_line.endswith(self.LINE_SEP):\n # Strip the colors to figure out the real number of characters in the line.\n if self._colors:\n for color_code in self._ANSI_REGEXP.findall(last_line):\n last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')\n self._last_position += len(last_line)\n else:\n self._last_position = 0\n self._is_first_line = False\n else:\n self._last_position = 0\n\n # Reset colors for the next print.\n if self._colors and not text.endswith(self.NORMAL):\n self._writer.write(self.NORMAL)\n",
"def write_line(self, text: str = ''):\n \"\"\"\n Prints a line of text to the screen.\n Uses the write method.\n\n :param text: The text to print.\n \"\"\"\n self.write(text + self.LINE_SEP)\n"
] |
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line)
def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
|
ofir123/py-printer
|
pyprinter/printer.py
|
Printer.write_title
|
python
|
def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3))
|
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L283-L292
|
[
"def write_line(self, text: str = ''):\n \"\"\"\n Prints a line of text to the screen.\n Uses the write method.\n\n :param text: The text to print.\n \"\"\"\n self.write(text + self.LINE_SEP)\n"
] |
class Printer:
"""
A user-friendly printer, with auxiliary functions for colors and tabs.
"""
DEFAULT_INDENT = 4
SEPARATOR = ':'
LINE_SEP = '\n'
# ANSI Color codes constants.
_ANSI_COLOR_PREFIX = '\x1b'
_ANSI_REGEXP = re.compile('\x1b\\[(\\d;)?(\\d+)m')
_ANSI_COLOR_CODE = f'{_ANSI_COLOR_PREFIX}[%s%dm'
_DARK_CODE = '0;'
_LIGHT_CODE = '1;'
NORMAL = _ANSI_COLOR_CODE % (_DARK_CODE, 0)
DARK_RED = _ANSI_COLOR_CODE % (_DARK_CODE, 31)
DARK_GREEN = _ANSI_COLOR_CODE % (_DARK_CODE, 32)
DARK_YELLOW = _ANSI_COLOR_CODE % (_DARK_CODE, 33)
DARK_BLUE = _ANSI_COLOR_CODE % (_DARK_CODE, 34)
DARK_PURPLE = _ANSI_COLOR_CODE % (_DARK_CODE, 35)
DARK_CYAN = _ANSI_COLOR_CODE % (_DARK_CODE, 36)
GREY = _ANSI_COLOR_CODE % (_DARK_CODE, 37)
RED = _ANSI_COLOR_CODE % (_LIGHT_CODE, 31)
GREEN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 32)
YELLOW = _ANSI_COLOR_CODE % (_LIGHT_CODE, 33)
BLUE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 34)
PURPLE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 35)
CYAN = _ANSI_COLOR_CODE % (_LIGHT_CODE, 36)
WHITE = _ANSI_COLOR_CODE % (_LIGHT_CODE, 37)
_COLORS_LIST = ['dark_red', 'dark_green', 'dark_yellow', 'dark_blue', 'dark_purple', 'dark_cyan', 'grey', 'red',
'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
_ANSI_COLOR_LENGTH = len(WHITE)
def __init__(self, writer, colors: bool = True, width_limit: bool = True):
"""
Initializes the printer with the given writer.
:param writer: The writer to use (for example - IPythonWriter, or DefaultWriter).
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
"""
self._writer = writer
self._in_line = False
self._colors = colors
self._width_limit = width_limit
self._last_position = 0
self._is_first_line = False
self._indents = []
self.indents_sum = 0
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line)
def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines
def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL)
def write_line(self, text: str = ''):
"""
Prints a line of text to the screen.
Uses the write method.
:param text: The text to print.
"""
self.write(text + self.LINE_SEP)
def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value))
def __getattr__(self, item: str):
# Support color function in a generic fashion.
if item in self._COLORS_LIST:
def wrapper(text):
# Color function content will be wrapped, and the rest of the text color will be normal.
wrapped_text = getattr(self, item.upper()) + text
# No need to duplicate normal color suffix.
if not wrapped_text.endswith(self.NORMAL):
wrapped_text += self.NORMAL
return wrapped_text
return wrapper
return super().__getattribute__(item)
|
ofir123/py-printer
|
pyprinter/table.py
|
Table.pretty_print
|
python
|
def pretty_print(self, printer: Optional[Printer] = None, align: int = ALIGN_CENTER, border: bool = False):
if printer is None:
printer = get_printer()
table_string = self._get_pretty_table(indent=printer.indents_sum, align=align, border=border).get_string()
if table_string != '':
first_line = table_string.splitlines()[0]
first_line_length = len(first_line) - len(re.findall(Printer._ANSI_REGEXP, first_line)) * \
Printer._ANSI_COLOR_LENGTH
if self.title_align == self.ALIGN_CENTER:
title = '{}{}'.format(' ' * (first_line_length // 2 - len(self.title) // 2), self.title)
elif self.title_align == self.ALIGN_LEFT:
title = self.title
else:
title = '{}{}'.format(' ' * (first_line_length - len(self.title)), self.title)
printer.write_line(printer.YELLOW + title)
# We split the table to lines in order to keep the indentation.
printer.write_line(table_string)
|
Pretty prints the table.
:param printer: The printer to print with.
:param align: The alignment of the cells(Table.ALIGN_CENTER/ALIGN_LEFT/ALIGN_RIGHT)
:param border: Whether to add a border around the table
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/table.py#L44-L67
|
[
"def get_printer(colors: bool = True, width_limit: bool = True, disabled: bool = False) -> Printer:\n \"\"\"\n Returns an already initialized instance of the printer.\n\n :param colors: If False, no colors will be printed.\n :param width_limit: If True, printing width will be limited by console width.\n :param disabled: If True, nothing will be printed.\n \"\"\"\n global _printer\n global _colors\n # Make sure we can print colors if needed.\n colors = colors and _colors\n # If the printer was never defined before, or the settings have changed.\n if not _printer or (colors != _printer._colors) or (width_limit != _printer._width_limit):\n _printer = Printer(DefaultWriter(disabled=disabled), colors=colors, width_limit=width_limit)\n return _printer\n",
"def get_string(self, **kwargs):\n\n \"\"\"Return string representation of table in current state.\n\n Arguments:\n\n start - index of first data row to include in output\n end - index of last data row to include in output PLUS ONE (list slice style)\n fields - names of fields (columns) to include\n header - print a header showing field names (True or False)\n border - print a border around the table (True or False)\n hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE\n vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE\n int_format - controls formatting of integer data\n float_format - controls formatting of floating point data\n padding_width - number of spaces on either side of column data (only used if left and right paddings are None)\n left_padding_width - number of spaces on left hand side of column data\n right_padding_width - number of spaces on right hand side of column data\n vertical_char - single character string used to draw vertical lines\n horizontal_char - single character string used to draw horizontal lines\n junction_char - single character string used to draw line junctions\n sortby - name of field to sort rows by\n sort_key - sorting key function, applied to data points before sorting\n reversesort - True or False to sort in descending or ascending order\n print empty - if True, stringify just the header for an empty table, if False return an empty string \"\"\"\n\n options = self._get_options(kwargs)\n\n lines = []\n\n # Don't think too hard about an empty table\n # Is this the desired behaviour? Maybe we should still print the header?\n if self.rowcount == 0 and (not options[\"print_empty\"] or not options[\"border\"]):\n return \"\"\n\n # Get the rows we need to print, taking into account slicing, sorting, etc.\n rows = self._get_rows(options)\n\n # Turn all data in all rows into Unicode, formatted as desired\n formatted_rows = self._format_rows(rows, options)\n\n # Compute column widths\n self._compute_widths(formatted_rows, options)\n\n # Add header or top of border\n self._hrule = self._stringify_hrule(options)\n if options[\"header\"]:\n lines.append(self._stringify_header(options))\n elif options[\"border\"] and options[\"hrules\"] in (ALL, FRAME):\n lines.append(self._hrule)\n\n # Add rows\n for row in formatted_rows:\n lines.append(self._stringify_row(row, options))\n\n # Add bottom of border\n if options[\"border\"] and options[\"hrules\"] == FRAME:\n lines.append(self._hrule)\n\n return self._unicode(\"\\n\").join(lines)\n",
"def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:\n \"\"\"\n Returns the table format of the scheme, i.e.:\n\n <table name>\n +----------------+----------------\n | <field1> | <field2>...\n +----------------+----------------\n | value1(field1) | value1(field2)\n | value2(field1) | value2(field2)\n | value3(field1) | value3(field2)\n +----------------+----------------\n \"\"\"\n rows = self.rows\n columns = self.columns\n # Add the column color.\n if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:\n # We need to copy the lists so that we wont insert colors in the original ones.\n rows[0] = rows[0][:]\n columns = columns[:]\n columns[0] = self._headers_color + columns[0]\n # Write the table itself in NORMAL color.\n rows[0][0] = Printer.NORMAL + str(rows[0][0])\n\n table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)\n table.align = self._ALIGN_DICTIONARY[align]\n\n for row in rows:\n table.add_row(row)\n\n # Set the max width according to the columns size dict, or by default size limit when columns were not provided.\n for column, max_width in self._column_size_map.items():\n table.max_width[column] = max_width\n\n return table\n"
] |
class Table(object):
"""
This class represent a table, by using rows.
"""
COLUMN_SIZE_LIMIT = 40
ALIGN_CENTER = 0
ALIGN_LEFT = 1
ALIGN_RIGHT = 2
_ALIGN_DICTIONARY = {ALIGN_CENTER: 'c', ALIGN_LEFT: 'l', ALIGN_RIGHT: 'r'}
def __init__(self, title: str, data: List[Dict[str, str]], column_size_map: Optional[Dict[str, int]] = None,
column_size_limit: int = COLUMN_SIZE_LIMIT, headers_color: str = Printer.NORMAL,
title_align: int = ALIGN_CENTER):
"""
Initializes the table.
:param title: The title of the table.
:param data: A list of dictionaries, each representing a row.
:param column_size_map: A map between each column name and its max size.
:param column_size_limit: Column values larger than that size will be truncated.
:param headers_color: The color of the columns (the headers of the table).
:param title_align: The alignment of the name of the table.
"""
self.title = title
self.data = data
self._column_size_map = defaultdict(lambda: column_size_limit)
if column_size_map:
for column_name, max_size in column_size_map.items():
self._column_size_map[column_name] = max_size
self._headers_color = headers_color
self.title_align = title_align
@property
def rows(self) -> List[List[str]]:
"""
Returns the table rows.
"""
return [list(d.values()) for d in self.data]
@property
def columns(self) -> List[str]:
"""
Returns the table columns.
"""
return list(self.data[0].keys())
def set_column_size_limit(self, column_name: str, size_limit: int):
"""
Sets the size limit of a specific column.
:param column_name: The name of the column to change.
:param size_limit: The max size of the column width.
"""
if self._column_size_map.get(column_name):
self._column_size_map[column_name] = size_limit
else:
raise ValueError(f'There is no column named {column_name}!')
def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:
"""
Returns the table format of the scheme, i.e.:
<table name>
+----------------+----------------
| <field1> | <field2>...
+----------------+----------------
| value1(field1) | value1(field2)
| value2(field1) | value2(field2)
| value3(field1) | value3(field2)
+----------------+----------------
"""
rows = self.rows
columns = self.columns
# Add the column color.
if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:
# We need to copy the lists so that we wont insert colors in the original ones.
rows[0] = rows[0][:]
columns = columns[:]
columns[0] = self._headers_color + columns[0]
# Write the table itself in NORMAL color.
rows[0][0] = Printer.NORMAL + str(rows[0][0])
table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)
table.align = self._ALIGN_DICTIONARY[align]
for row in rows:
table.add_row(row)
# Set the max width according to the columns size dict, or by default size limit when columns were not provided.
for column, max_width in self._column_size_map.items():
table.max_width[column] = max_width
return table
def get_as_html(self) -> str:
"""
Returns the table object as an HTML string.
:return: HTML representation of the table.
"""
table_string = self._get_pretty_table().get_html_string()
title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title)
return f'<center><h1>{title}</h1></center>{table_string}'
def get_as_csv(self, output_file_path: Optional[str] = None) -> str:
"""
Returns the table object as a CSV string.
:param output_file_path: The output file to save the CSV to, or None.
:return: CSV representation of the table.
"""
output = StringIO() if not output_file_path else open(output_file_path, 'w')
try:
csv_writer = csv.writer(output)
csv_writer.writerow(self.columns)
for row in self.rows:
csv_writer.writerow(row)
output.seek(0)
return output.read()
finally:
output.close()
def __iter__(self):
return iter(self.rows)
|
ofir123/py-printer
|
pyprinter/table.py
|
Table.rows
|
python
|
def rows(self) -> List[List[str]]:
return [list(d.values()) for d in self.data]
|
Returns the table rows.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/table.py#L70-L74
| null |
class Table(object):
"""
This class represent a table, by using rows.
"""
COLUMN_SIZE_LIMIT = 40
ALIGN_CENTER = 0
ALIGN_LEFT = 1
ALIGN_RIGHT = 2
_ALIGN_DICTIONARY = {ALIGN_CENTER: 'c', ALIGN_LEFT: 'l', ALIGN_RIGHT: 'r'}
def __init__(self, title: str, data: List[Dict[str, str]], column_size_map: Optional[Dict[str, int]] = None,
column_size_limit: int = COLUMN_SIZE_LIMIT, headers_color: str = Printer.NORMAL,
title_align: int = ALIGN_CENTER):
"""
Initializes the table.
:param title: The title of the table.
:param data: A list of dictionaries, each representing a row.
:param column_size_map: A map between each column name and its max size.
:param column_size_limit: Column values larger than that size will be truncated.
:param headers_color: The color of the columns (the headers of the table).
:param title_align: The alignment of the name of the table.
"""
self.title = title
self.data = data
self._column_size_map = defaultdict(lambda: column_size_limit)
if column_size_map:
for column_name, max_size in column_size_map.items():
self._column_size_map[column_name] = max_size
self._headers_color = headers_color
self.title_align = title_align
def pretty_print(self, printer: Optional[Printer] = None, align: int = ALIGN_CENTER, border: bool = False):
"""
Pretty prints the table.
:param printer: The printer to print with.
:param align: The alignment of the cells(Table.ALIGN_CENTER/ALIGN_LEFT/ALIGN_RIGHT)
:param border: Whether to add a border around the table
"""
if printer is None:
printer = get_printer()
table_string = self._get_pretty_table(indent=printer.indents_sum, align=align, border=border).get_string()
if table_string != '':
first_line = table_string.splitlines()[0]
first_line_length = len(first_line) - len(re.findall(Printer._ANSI_REGEXP, first_line)) * \
Printer._ANSI_COLOR_LENGTH
if self.title_align == self.ALIGN_CENTER:
title = '{}{}'.format(' ' * (first_line_length // 2 - len(self.title) // 2), self.title)
elif self.title_align == self.ALIGN_LEFT:
title = self.title
else:
title = '{}{}'.format(' ' * (first_line_length - len(self.title)), self.title)
printer.write_line(printer.YELLOW + title)
# We split the table to lines in order to keep the indentation.
printer.write_line(table_string)
@property
@property
def columns(self) -> List[str]:
"""
Returns the table columns.
"""
return list(self.data[0].keys())
def set_column_size_limit(self, column_name: str, size_limit: int):
"""
Sets the size limit of a specific column.
:param column_name: The name of the column to change.
:param size_limit: The max size of the column width.
"""
if self._column_size_map.get(column_name):
self._column_size_map[column_name] = size_limit
else:
raise ValueError(f'There is no column named {column_name}!')
def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:
"""
Returns the table format of the scheme, i.e.:
<table name>
+----------------+----------------
| <field1> | <field2>...
+----------------+----------------
| value1(field1) | value1(field2)
| value2(field1) | value2(field2)
| value3(field1) | value3(field2)
+----------------+----------------
"""
rows = self.rows
columns = self.columns
# Add the column color.
if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:
# We need to copy the lists so that we wont insert colors in the original ones.
rows[0] = rows[0][:]
columns = columns[:]
columns[0] = self._headers_color + columns[0]
# Write the table itself in NORMAL color.
rows[0][0] = Printer.NORMAL + str(rows[0][0])
table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)
table.align = self._ALIGN_DICTIONARY[align]
for row in rows:
table.add_row(row)
# Set the max width according to the columns size dict, or by default size limit when columns were not provided.
for column, max_width in self._column_size_map.items():
table.max_width[column] = max_width
return table
def get_as_html(self) -> str:
"""
Returns the table object as an HTML string.
:return: HTML representation of the table.
"""
table_string = self._get_pretty_table().get_html_string()
title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title)
return f'<center><h1>{title}</h1></center>{table_string}'
def get_as_csv(self, output_file_path: Optional[str] = None) -> str:
"""
Returns the table object as a CSV string.
:param output_file_path: The output file to save the CSV to, or None.
:return: CSV representation of the table.
"""
output = StringIO() if not output_file_path else open(output_file_path, 'w')
try:
csv_writer = csv.writer(output)
csv_writer.writerow(self.columns)
for row in self.rows:
csv_writer.writerow(row)
output.seek(0)
return output.read()
finally:
output.close()
def __iter__(self):
return iter(self.rows)
|
ofir123/py-printer
|
pyprinter/table.py
|
Table.set_column_size_limit
|
python
|
def set_column_size_limit(self, column_name: str, size_limit: int):
if self._column_size_map.get(column_name):
self._column_size_map[column_name] = size_limit
else:
raise ValueError(f'There is no column named {column_name}!')
|
Sets the size limit of a specific column.
:param column_name: The name of the column to change.
:param size_limit: The max size of the column width.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/table.py#L83-L93
| null |
class Table(object):
"""
This class represent a table, by using rows.
"""
COLUMN_SIZE_LIMIT = 40
ALIGN_CENTER = 0
ALIGN_LEFT = 1
ALIGN_RIGHT = 2
_ALIGN_DICTIONARY = {ALIGN_CENTER: 'c', ALIGN_LEFT: 'l', ALIGN_RIGHT: 'r'}
def __init__(self, title: str, data: List[Dict[str, str]], column_size_map: Optional[Dict[str, int]] = None,
column_size_limit: int = COLUMN_SIZE_LIMIT, headers_color: str = Printer.NORMAL,
title_align: int = ALIGN_CENTER):
"""
Initializes the table.
:param title: The title of the table.
:param data: A list of dictionaries, each representing a row.
:param column_size_map: A map between each column name and its max size.
:param column_size_limit: Column values larger than that size will be truncated.
:param headers_color: The color of the columns (the headers of the table).
:param title_align: The alignment of the name of the table.
"""
self.title = title
self.data = data
self._column_size_map = defaultdict(lambda: column_size_limit)
if column_size_map:
for column_name, max_size in column_size_map.items():
self._column_size_map[column_name] = max_size
self._headers_color = headers_color
self.title_align = title_align
def pretty_print(self, printer: Optional[Printer] = None, align: int = ALIGN_CENTER, border: bool = False):
"""
Pretty prints the table.
:param printer: The printer to print with.
:param align: The alignment of the cells(Table.ALIGN_CENTER/ALIGN_LEFT/ALIGN_RIGHT)
:param border: Whether to add a border around the table
"""
if printer is None:
printer = get_printer()
table_string = self._get_pretty_table(indent=printer.indents_sum, align=align, border=border).get_string()
if table_string != '':
first_line = table_string.splitlines()[0]
first_line_length = len(first_line) - len(re.findall(Printer._ANSI_REGEXP, first_line)) * \
Printer._ANSI_COLOR_LENGTH
if self.title_align == self.ALIGN_CENTER:
title = '{}{}'.format(' ' * (first_line_length // 2 - len(self.title) // 2), self.title)
elif self.title_align == self.ALIGN_LEFT:
title = self.title
else:
title = '{}{}'.format(' ' * (first_line_length - len(self.title)), self.title)
printer.write_line(printer.YELLOW + title)
# We split the table to lines in order to keep the indentation.
printer.write_line(table_string)
@property
def rows(self) -> List[List[str]]:
"""
Returns the table rows.
"""
return [list(d.values()) for d in self.data]
@property
def columns(self) -> List[str]:
"""
Returns the table columns.
"""
return list(self.data[0].keys())
def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:
"""
Returns the table format of the scheme, i.e.:
<table name>
+----------------+----------------
| <field1> | <field2>...
+----------------+----------------
| value1(field1) | value1(field2)
| value2(field1) | value2(field2)
| value3(field1) | value3(field2)
+----------------+----------------
"""
rows = self.rows
columns = self.columns
# Add the column color.
if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:
# We need to copy the lists so that we wont insert colors in the original ones.
rows[0] = rows[0][:]
columns = columns[:]
columns[0] = self._headers_color + columns[0]
# Write the table itself in NORMAL color.
rows[0][0] = Printer.NORMAL + str(rows[0][0])
table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)
table.align = self._ALIGN_DICTIONARY[align]
for row in rows:
table.add_row(row)
# Set the max width according to the columns size dict, or by default size limit when columns were not provided.
for column, max_width in self._column_size_map.items():
table.max_width[column] = max_width
return table
def get_as_html(self) -> str:
"""
Returns the table object as an HTML string.
:return: HTML representation of the table.
"""
table_string = self._get_pretty_table().get_html_string()
title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title)
return f'<center><h1>{title}</h1></center>{table_string}'
def get_as_csv(self, output_file_path: Optional[str] = None) -> str:
"""
Returns the table object as a CSV string.
:param output_file_path: The output file to save the CSV to, or None.
:return: CSV representation of the table.
"""
output = StringIO() if not output_file_path else open(output_file_path, 'w')
try:
csv_writer = csv.writer(output)
csv_writer.writerow(self.columns)
for row in self.rows:
csv_writer.writerow(row)
output.seek(0)
return output.read()
finally:
output.close()
def __iter__(self):
return iter(self.rows)
|
ofir123/py-printer
|
pyprinter/table.py
|
Table._get_pretty_table
|
python
|
def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:
rows = self.rows
columns = self.columns
# Add the column color.
if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:
# We need to copy the lists so that we wont insert colors in the original ones.
rows[0] = rows[0][:]
columns = columns[:]
columns[0] = self._headers_color + columns[0]
# Write the table itself in NORMAL color.
rows[0][0] = Printer.NORMAL + str(rows[0][0])
table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)
table.align = self._ALIGN_DICTIONARY[align]
for row in rows:
table.add_row(row)
# Set the max width according to the columns size dict, or by default size limit when columns were not provided.
for column, max_width in self._column_size_map.items():
table.max_width[column] = max_width
return table
|
Returns the table format of the scheme, i.e.:
<table name>
+----------------+----------------
| <field1> | <field2>...
+----------------+----------------
| value1(field1) | value1(field2)
| value2(field1) | value2(field2)
| value3(field1) | value3(field2)
+----------------+----------------
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/table.py#L95-L129
|
[
"def get_console_width() -> int:\n \"\"\"\n A small utility function for getting the current console window's width.\n\n :return: The current console window's width.\n \"\"\"\n # Assigning the value once, as frequent call to this function\n # causes a major slow down(ImportErrors + isinstance).\n global _IN_QT\n if _IN_QT is None:\n _IN_QT = _in_qtconsole()\n\n try:\n if _IN_QT:\n # QTConsole determines and handles the max line length by itself.\n width = sys.maxsize\n else:\n width = _get_windows_console_width() if os.name == 'nt' else _get_linux_console_width()\n if width <= 0:\n return 80\n return width\n except Exception:\n # Default value.\n return 80\n",
"def add_row(self, row):\n\n \"\"\"Add a row to the table\n\n Arguments:\n\n row - row of data, should be a list with as many elements as the table\n has fields\"\"\"\n\n if self._field_names and len(row) != len(self._field_names):\n raise Exception(\"Row has incorrect number of values, (actual) %d!=%d (expected)\" %(len(row),len(self._field_names)))\n if not self._field_names:\n self.field_names = [(\"Field %d\" % (n+1)) for n in range(0,len(row))]\n self._rows.append(list(row))\n"
] |
class Table(object):
"""
This class represent a table, by using rows.
"""
COLUMN_SIZE_LIMIT = 40
ALIGN_CENTER = 0
ALIGN_LEFT = 1
ALIGN_RIGHT = 2
_ALIGN_DICTIONARY = {ALIGN_CENTER: 'c', ALIGN_LEFT: 'l', ALIGN_RIGHT: 'r'}
def __init__(self, title: str, data: List[Dict[str, str]], column_size_map: Optional[Dict[str, int]] = None,
column_size_limit: int = COLUMN_SIZE_LIMIT, headers_color: str = Printer.NORMAL,
title_align: int = ALIGN_CENTER):
"""
Initializes the table.
:param title: The title of the table.
:param data: A list of dictionaries, each representing a row.
:param column_size_map: A map between each column name and its max size.
:param column_size_limit: Column values larger than that size will be truncated.
:param headers_color: The color of the columns (the headers of the table).
:param title_align: The alignment of the name of the table.
"""
self.title = title
self.data = data
self._column_size_map = defaultdict(lambda: column_size_limit)
if column_size_map:
for column_name, max_size in column_size_map.items():
self._column_size_map[column_name] = max_size
self._headers_color = headers_color
self.title_align = title_align
def pretty_print(self, printer: Optional[Printer] = None, align: int = ALIGN_CENTER, border: bool = False):
"""
Pretty prints the table.
:param printer: The printer to print with.
:param align: The alignment of the cells(Table.ALIGN_CENTER/ALIGN_LEFT/ALIGN_RIGHT)
:param border: Whether to add a border around the table
"""
if printer is None:
printer = get_printer()
table_string = self._get_pretty_table(indent=printer.indents_sum, align=align, border=border).get_string()
if table_string != '':
first_line = table_string.splitlines()[0]
first_line_length = len(first_line) - len(re.findall(Printer._ANSI_REGEXP, first_line)) * \
Printer._ANSI_COLOR_LENGTH
if self.title_align == self.ALIGN_CENTER:
title = '{}{}'.format(' ' * (first_line_length // 2 - len(self.title) // 2), self.title)
elif self.title_align == self.ALIGN_LEFT:
title = self.title
else:
title = '{}{}'.format(' ' * (first_line_length - len(self.title)), self.title)
printer.write_line(printer.YELLOW + title)
# We split the table to lines in order to keep the indentation.
printer.write_line(table_string)
@property
def rows(self) -> List[List[str]]:
"""
Returns the table rows.
"""
return [list(d.values()) for d in self.data]
@property
def columns(self) -> List[str]:
"""
Returns the table columns.
"""
return list(self.data[0].keys())
def set_column_size_limit(self, column_name: str, size_limit: int):
"""
Sets the size limit of a specific column.
:param column_name: The name of the column to change.
:param size_limit: The max size of the column width.
"""
if self._column_size_map.get(column_name):
self._column_size_map[column_name] = size_limit
else:
raise ValueError(f'There is no column named {column_name}!')
def get_as_html(self) -> str:
"""
Returns the table object as an HTML string.
:return: HTML representation of the table.
"""
table_string = self._get_pretty_table().get_html_string()
title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title)
return f'<center><h1>{title}</h1></center>{table_string}'
def get_as_csv(self, output_file_path: Optional[str] = None) -> str:
"""
Returns the table object as a CSV string.
:param output_file_path: The output file to save the CSV to, or None.
:return: CSV representation of the table.
"""
output = StringIO() if not output_file_path else open(output_file_path, 'w')
try:
csv_writer = csv.writer(output)
csv_writer.writerow(self.columns)
for row in self.rows:
csv_writer.writerow(row)
output.seek(0)
return output.read()
finally:
output.close()
def __iter__(self):
return iter(self.rows)
|
ofir123/py-printer
|
pyprinter/table.py
|
Table.get_as_html
|
python
|
def get_as_html(self) -> str:
table_string = self._get_pretty_table().get_html_string()
title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title)
return f'<center><h1>{title}</h1></center>{table_string}'
|
Returns the table object as an HTML string.
:return: HTML representation of the table.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/table.py#L131-L139
|
[
"def get_html_string(self, **kwargs):\n\n \"\"\"Return string representation of HTML formatted version of table in current state.\n\n Arguments:\n\n start - index of first data row to include in output\n end - index of last data row to include in output PLUS ONE (list slice style)\n fields - names of fields (columns) to include\n header - print a header showing field names (True or False)\n border - print a border around the table (True or False)\n hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE\n vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE\n int_format - controls formatting of integer data\n float_format - controls formatting of floating point data\n padding_width - number of spaces on either side of column data (only used if left and right paddings are None)\n left_padding_width - number of spaces on left hand side of column data\n right_padding_width - number of spaces on right hand side of column data\n sortby - name of field to sort rows by\n sort_key - sorting key function, applied to data points before sorting\n attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag\n xhtml - print <br/> tags if True, <br> tags if false\"\"\"\n\n options = self._get_options(kwargs)\n\n if options[\"format\"]:\n string = self._get_formatted_html_string(options)\n else:\n string = self._get_simple_html_string(options)\n\n return string\n",
"def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:\n \"\"\"\n Returns the table format of the scheme, i.e.:\n\n <table name>\n +----------------+----------------\n | <field1> | <field2>...\n +----------------+----------------\n | value1(field1) | value1(field2)\n | value2(field1) | value2(field2)\n | value3(field1) | value3(field2)\n +----------------+----------------\n \"\"\"\n rows = self.rows\n columns = self.columns\n # Add the column color.\n if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:\n # We need to copy the lists so that we wont insert colors in the original ones.\n rows[0] = rows[0][:]\n columns = columns[:]\n columns[0] = self._headers_color + columns[0]\n # Write the table itself in NORMAL color.\n rows[0][0] = Printer.NORMAL + str(rows[0][0])\n\n table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)\n table.align = self._ALIGN_DICTIONARY[align]\n\n for row in rows:\n table.add_row(row)\n\n # Set the max width according to the columns size dict, or by default size limit when columns were not provided.\n for column, max_width in self._column_size_map.items():\n table.max_width[column] = max_width\n\n return table\n"
] |
class Table(object):
"""
This class represent a table, by using rows.
"""
COLUMN_SIZE_LIMIT = 40
ALIGN_CENTER = 0
ALIGN_LEFT = 1
ALIGN_RIGHT = 2
_ALIGN_DICTIONARY = {ALIGN_CENTER: 'c', ALIGN_LEFT: 'l', ALIGN_RIGHT: 'r'}
def __init__(self, title: str, data: List[Dict[str, str]], column_size_map: Optional[Dict[str, int]] = None,
column_size_limit: int = COLUMN_SIZE_LIMIT, headers_color: str = Printer.NORMAL,
title_align: int = ALIGN_CENTER):
"""
Initializes the table.
:param title: The title of the table.
:param data: A list of dictionaries, each representing a row.
:param column_size_map: A map between each column name and its max size.
:param column_size_limit: Column values larger than that size will be truncated.
:param headers_color: The color of the columns (the headers of the table).
:param title_align: The alignment of the name of the table.
"""
self.title = title
self.data = data
self._column_size_map = defaultdict(lambda: column_size_limit)
if column_size_map:
for column_name, max_size in column_size_map.items():
self._column_size_map[column_name] = max_size
self._headers_color = headers_color
self.title_align = title_align
def pretty_print(self, printer: Optional[Printer] = None, align: int = ALIGN_CENTER, border: bool = False):
"""
Pretty prints the table.
:param printer: The printer to print with.
:param align: The alignment of the cells(Table.ALIGN_CENTER/ALIGN_LEFT/ALIGN_RIGHT)
:param border: Whether to add a border around the table
"""
if printer is None:
printer = get_printer()
table_string = self._get_pretty_table(indent=printer.indents_sum, align=align, border=border).get_string()
if table_string != '':
first_line = table_string.splitlines()[0]
first_line_length = len(first_line) - len(re.findall(Printer._ANSI_REGEXP, first_line)) * \
Printer._ANSI_COLOR_LENGTH
if self.title_align == self.ALIGN_CENTER:
title = '{}{}'.format(' ' * (first_line_length // 2 - len(self.title) // 2), self.title)
elif self.title_align == self.ALIGN_LEFT:
title = self.title
else:
title = '{}{}'.format(' ' * (first_line_length - len(self.title)), self.title)
printer.write_line(printer.YELLOW + title)
# We split the table to lines in order to keep the indentation.
printer.write_line(table_string)
@property
def rows(self) -> List[List[str]]:
"""
Returns the table rows.
"""
return [list(d.values()) for d in self.data]
@property
def columns(self) -> List[str]:
"""
Returns the table columns.
"""
return list(self.data[0].keys())
def set_column_size_limit(self, column_name: str, size_limit: int):
"""
Sets the size limit of a specific column.
:param column_name: The name of the column to change.
:param size_limit: The max size of the column width.
"""
if self._column_size_map.get(column_name):
self._column_size_map[column_name] = size_limit
else:
raise ValueError(f'There is no column named {column_name}!')
def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:
"""
Returns the table format of the scheme, i.e.:
<table name>
+----------------+----------------
| <field1> | <field2>...
+----------------+----------------
| value1(field1) | value1(field2)
| value2(field1) | value2(field2)
| value3(field1) | value3(field2)
+----------------+----------------
"""
rows = self.rows
columns = self.columns
# Add the column color.
if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:
# We need to copy the lists so that we wont insert colors in the original ones.
rows[0] = rows[0][:]
columns = columns[:]
columns[0] = self._headers_color + columns[0]
# Write the table itself in NORMAL color.
rows[0][0] = Printer.NORMAL + str(rows[0][0])
table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)
table.align = self._ALIGN_DICTIONARY[align]
for row in rows:
table.add_row(row)
# Set the max width according to the columns size dict, or by default size limit when columns were not provided.
for column, max_width in self._column_size_map.items():
table.max_width[column] = max_width
return table
def get_as_csv(self, output_file_path: Optional[str] = None) -> str:
"""
Returns the table object as a CSV string.
:param output_file_path: The output file to save the CSV to, or None.
:return: CSV representation of the table.
"""
output = StringIO() if not output_file_path else open(output_file_path, 'w')
try:
csv_writer = csv.writer(output)
csv_writer.writerow(self.columns)
for row in self.rows:
csv_writer.writerow(row)
output.seek(0)
return output.read()
finally:
output.close()
def __iter__(self):
return iter(self.rows)
|
ofir123/py-printer
|
pyprinter/table.py
|
Table.get_as_csv
|
python
|
def get_as_csv(self, output_file_path: Optional[str] = None) -> str:
output = StringIO() if not output_file_path else open(output_file_path, 'w')
try:
csv_writer = csv.writer(output)
csv_writer.writerow(self.columns)
for row in self.rows:
csv_writer.writerow(row)
output.seek(0)
return output.read()
finally:
output.close()
|
Returns the table object as a CSV string.
:param output_file_path: The output file to save the CSV to, or None.
:return: CSV representation of the table.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/table.py#L141-L158
| null |
class Table(object):
"""
This class represent a table, by using rows.
"""
COLUMN_SIZE_LIMIT = 40
ALIGN_CENTER = 0
ALIGN_LEFT = 1
ALIGN_RIGHT = 2
_ALIGN_DICTIONARY = {ALIGN_CENTER: 'c', ALIGN_LEFT: 'l', ALIGN_RIGHT: 'r'}
def __init__(self, title: str, data: List[Dict[str, str]], column_size_map: Optional[Dict[str, int]] = None,
column_size_limit: int = COLUMN_SIZE_LIMIT, headers_color: str = Printer.NORMAL,
title_align: int = ALIGN_CENTER):
"""
Initializes the table.
:param title: The title of the table.
:param data: A list of dictionaries, each representing a row.
:param column_size_map: A map between each column name and its max size.
:param column_size_limit: Column values larger than that size will be truncated.
:param headers_color: The color of the columns (the headers of the table).
:param title_align: The alignment of the name of the table.
"""
self.title = title
self.data = data
self._column_size_map = defaultdict(lambda: column_size_limit)
if column_size_map:
for column_name, max_size in column_size_map.items():
self._column_size_map[column_name] = max_size
self._headers_color = headers_color
self.title_align = title_align
def pretty_print(self, printer: Optional[Printer] = None, align: int = ALIGN_CENTER, border: bool = False):
"""
Pretty prints the table.
:param printer: The printer to print with.
:param align: The alignment of the cells(Table.ALIGN_CENTER/ALIGN_LEFT/ALIGN_RIGHT)
:param border: Whether to add a border around the table
"""
if printer is None:
printer = get_printer()
table_string = self._get_pretty_table(indent=printer.indents_sum, align=align, border=border).get_string()
if table_string != '':
first_line = table_string.splitlines()[0]
first_line_length = len(first_line) - len(re.findall(Printer._ANSI_REGEXP, first_line)) * \
Printer._ANSI_COLOR_LENGTH
if self.title_align == self.ALIGN_CENTER:
title = '{}{}'.format(' ' * (first_line_length // 2 - len(self.title) // 2), self.title)
elif self.title_align == self.ALIGN_LEFT:
title = self.title
else:
title = '{}{}'.format(' ' * (first_line_length - len(self.title)), self.title)
printer.write_line(printer.YELLOW + title)
# We split the table to lines in order to keep the indentation.
printer.write_line(table_string)
@property
def rows(self) -> List[List[str]]:
"""
Returns the table rows.
"""
return [list(d.values()) for d in self.data]
@property
def columns(self) -> List[str]:
"""
Returns the table columns.
"""
return list(self.data[0].keys())
def set_column_size_limit(self, column_name: str, size_limit: int):
"""
Sets the size limit of a specific column.
:param column_name: The name of the column to change.
:param size_limit: The max size of the column width.
"""
if self._column_size_map.get(column_name):
self._column_size_map[column_name] = size_limit
else:
raise ValueError(f'There is no column named {column_name}!')
def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:
"""
Returns the table format of the scheme, i.e.:
<table name>
+----------------+----------------
| <field1> | <field2>...
+----------------+----------------
| value1(field1) | value1(field2)
| value2(field1) | value2(field2)
| value3(field1) | value3(field2)
+----------------+----------------
"""
rows = self.rows
columns = self.columns
# Add the column color.
if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:
# We need to copy the lists so that we wont insert colors in the original ones.
rows[0] = rows[0][:]
columns = columns[:]
columns[0] = self._headers_color + columns[0]
# Write the table itself in NORMAL color.
rows[0][0] = Printer.NORMAL + str(rows[0][0])
table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)
table.align = self._ALIGN_DICTIONARY[align]
for row in rows:
table.add_row(row)
# Set the max width according to the columns size dict, or by default size limit when columns were not provided.
for column, max_width in self._column_size_map.items():
table.max_width[column] = max_width
return table
def get_as_html(self) -> str:
"""
Returns the table object as an HTML string.
:return: HTML representation of the table.
"""
table_string = self._get_pretty_table().get_html_string()
title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title)
return f'<center><h1>{title}</h1></center>{table_string}'
def __iter__(self):
return iter(self.rows)
|
ofir123/py-printer
|
pyprinter/file_size.py
|
FileSize._unit_info
|
python
|
def _unit_info(self) -> Tuple[str, int]:
abs_bytes = abs(self.size)
if abs_bytes < 1024:
unit = 'B'
unit_divider = 1
elif abs_bytes < (1024 ** 2):
unit = 'KB'
unit_divider = 1024
elif abs_bytes < (1024 ** 3):
unit = 'MB'
unit_divider = (1024 ** 2)
elif abs_bytes < (1024 ** 4):
unit = 'GB'
unit_divider = (1024 ** 3)
else:
unit = 'TB'
unit_divider = (1024 ** 4)
return unit, unit_divider
|
Returns both the best unit to measure the size, and its power.
:return: A tuple containing the unit and its power.
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/file_size.py#L54-L77
| null |
class FileSize:
"""
Represents a file size measured in bytes.
"""
MULTIPLIERS = [('kb', 1024), ('mb', 1024 ** 2), ('gb', 1024 ** 3), ('tb', 1024 ** 4), ('b', 1)]
SIZE_COLORS = {
'B': Printer.YELLOW,
'KB': Printer.CYAN,
'MB': Printer.GREEN,
'GB': Printer.RED,
'TB': Printer.DARK_RED
}
def __init__(self, size: Union[int, float, str, bytes, _FileSizeType]):
"""
Initializes a new FileSize from an integer (or a string) of the bytes amount.
"""
# Handle cases where size is another FileSize instance (Copy C'tor).
if isinstance(size, FileSize):
self.size = size.size
else:
chosen_multiplier = 1
# Handle cases where size is a string like '1,600 KB'.
if isinstance(size, bytes):
size = size.decode('UTF-8')
if isinstance(size, str):
size = size.replace(',', '').lower()
for ms, mi in self.MULTIPLIERS:
if size.endswith(ms):
chosen_multiplier = mi
size = size[:-len(ms)]
break
self.size = int(float(size) * chosen_multiplier)
def __str__(self) -> str:
unit, unit_divider = self._unit_info()
# We multiply then divide by 100 in order to have only two decimal places.
size_in_unit = (self.size * 100) / unit_divider / 100
return f'{size_in_unit:.1f} {unit}'
def __repr__(self) -> str:
return f'<FileSize - {self}>'
@property
def bytes(self) -> int:
return self.size
@property
def kilo_bytes(self) -> int:
return self.bytes // 1024
@property
def mega_bytes(self) -> int:
return self.kilo_bytes // 1024
@staticmethod
def get_file_size_string(size_bytes: int) -> str:
return str(FileSize(size_bytes))
def __add__(self, file_size: Union[int, float, _FileSizeType]) -> _FileSizeType:
"""
Handles adding numbers or file sizes to the file size.
:param file_size: The size to add to the current file size.
:return: A new file size with the combined number of the file sizes.
"""
if isinstance(file_size, FileSize):
return FileSize(self.size + file_size.size)
if isinstance(file_size, (int, float)):
return FileSize(self.size + file_size)
raise TypeError(f'Can\'t add a {type(file_size).__name__} to a file size')
def __sub__(self, file_size: Union[int, float, _FileSizeType]) -> _FileSizeType:
"""
Handles subtracting numbers or file sizes from the file size.
:param file_size: The size to subtract from the current file size.
:return: A new file size with the difference between the file sizes.
"""
if isinstance(file_size, FileSize):
return FileSize(self.size - file_size.size)
if isinstance(file_size, (int, float)):
return FileSize(self.size - file_size)
raise TypeError(f'Can\'t subtract a {type(file_size).__name__} from a file size')
def __int__(self) -> int:
return self.size
def __float__(self) -> float:
return float(self.size)
def __mul__(self, amount: Union[int, float]) -> _FileSizeType:
"""
Multiplies the file size by the specified amount.
:param amount: The amount by which to multiply.
:return: A new file size with the multiplied value of this file size.
"""
if isinstance(amount, (int, float)):
return FileSize(self.size * amount)
raise TypeError(f'Can\'t multiply a file size by a {type(amount).__name__} (only by a number)')
def __truediv__(self, amount: Union[int, float]) -> _FileSizeType:
"""
Divides the file size by the specified amount.
:param amount: The amount by which to divide.
:return: A new file size with the divided value of this file size.
"""
if isinstance(amount, (int, float)):
return FileSize(self.size / amount)
raise TypeError(f'Can\'t divide a file size by a {type(amount).__name__} (only by a number)')
def __floordiv__(self, amount: Union[int, float]) -> _FileSizeType:
"""
Divides the file size by the specified amount and floors the result.
:param amount: The amount by which to divide.
:return: A new file size with the divided value of this file size.
"""
if isinstance(amount, (int, float)):
return FileSize(self.size // amount)
raise TypeError(f'Can\'t divide a file size by a {type(amount).__name__} (only by a number)')
def __lt__(self, other) -> bool:
"""
Returns whether this size is less than the other size.
:param FileSize other: The other size.
"""
return int(self) < int(FileSize(other))
def __le__(self, other) -> bool:
"""
Returns whether this size is less than or equal to the other size.
:param FileSize other: The other size.
"""
return int(self) <= int(FileSize(other))
def __eq__(self, other) -> bool:
"""
Returns whether this size is equal to the other size.
:param FileSize other: The other size.
"""
return other is not None and isinstance(other, (int, float, FileSize)) and int(self) == int(FileSize(other))
def __ne__(self, other) -> bool:
"""
Returns whether this size is not equal to the other size.
:param FileSize other: The other size.
"""
return not self.__eq__(other)
def __gt__(self, other) -> bool:
"""
Returns whether this size is greater than the other size.
:param FileSize other: The other size.
"""
return int(self) > int(FileSize(other))
def __ge__(self, other) -> bool:
"""
Returns whether this size is greater than or equal to the other size.
:param FileSize other: The other size.
"""
return int(self) >= int(FileSize(other))
def pretty_print(self, printer: Optional[Printer] = None, min_width: int = 1, min_unit_width: int = 1):
"""
Prints the file size (and it's unit), reserving places for longer sizes and units.
For example:
min_unit_width = 1:
793 B
100 KB
min_unit_width = 2:
793 B
100 KB
min_unit_width = 3:
793 B
100 KB
"""
unit, unit_divider = self._unit_info()
unit_color = self.SIZE_COLORS[unit]
# Multiply and then divide by 100 in order to have only two decimal places.
size_in_unit = (self.size * 100) / unit_divider / 100
# Add spaces to align the units.
unit = '{}{}'.format(' ' * (min_unit_width - len(unit)), unit)
size_string = f'{size_in_unit:.1f}'
total_len = len(size_string) + 1 + len(unit)
if printer is None:
printer = get_printer()
spaces_count = min_width - total_len
if spaces_count > 0:
printer.write(' ' * spaces_count)
printer.write(f'{size_string} {unit_color}{unit}')
|
ofir123/py-printer
|
pyprinter/file_size.py
|
FileSize.pretty_print
|
python
|
def pretty_print(self, printer: Optional[Printer] = None, min_width: int = 1, min_unit_width: int = 1):
unit, unit_divider = self._unit_info()
unit_color = self.SIZE_COLORS[unit]
# Multiply and then divide by 100 in order to have only two decimal places.
size_in_unit = (self.size * 100) / unit_divider / 100
# Add spaces to align the units.
unit = '{}{}'.format(' ' * (min_unit_width - len(unit)), unit)
size_string = f'{size_in_unit:.1f}'
total_len = len(size_string) + 1 + len(unit)
if printer is None:
printer = get_printer()
spaces_count = min_width - total_len
if spaces_count > 0:
printer.write(' ' * spaces_count)
printer.write(f'{size_string} {unit_color}{unit}')
|
Prints the file size (and it's unit), reserving places for longer sizes and units.
For example:
min_unit_width = 1:
793 B
100 KB
min_unit_width = 2:
793 B
100 KB
min_unit_width = 3:
793 B
100 KB
|
train
|
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/file_size.py#L208-L235
|
[
"def get_printer(colors: bool = True, width_limit: bool = True, disabled: bool = False) -> Printer:\n \"\"\"\n Returns an already initialized instance of the printer.\n\n :param colors: If False, no colors will be printed.\n :param width_limit: If True, printing width will be limited by console width.\n :param disabled: If True, nothing will be printed.\n \"\"\"\n global _printer\n global _colors\n # Make sure we can print colors if needed.\n colors = colors and _colors\n # If the printer was never defined before, or the settings have changed.\n if not _printer or (colors != _printer._colors) or (width_limit != _printer._width_limit):\n _printer = Printer(DefaultWriter(disabled=disabled), colors=colors, width_limit=width_limit)\n return _printer\n",
"def _unit_info(self) -> Tuple[str, int]:\n \"\"\"\n Returns both the best unit to measure the size, and its power.\n\n :return: A tuple containing the unit and its power.\n \"\"\"\n abs_bytes = abs(self.size)\n if abs_bytes < 1024:\n unit = 'B'\n unit_divider = 1\n elif abs_bytes < (1024 ** 2):\n unit = 'KB'\n unit_divider = 1024\n elif abs_bytes < (1024 ** 3):\n unit = 'MB'\n unit_divider = (1024 ** 2)\n elif abs_bytes < (1024 ** 4):\n unit = 'GB'\n unit_divider = (1024 ** 3)\n else:\n unit = 'TB'\n unit_divider = (1024 ** 4)\n\n return unit, unit_divider\n"
] |
class FileSize:
"""
Represents a file size measured in bytes.
"""
MULTIPLIERS = [('kb', 1024), ('mb', 1024 ** 2), ('gb', 1024 ** 3), ('tb', 1024 ** 4), ('b', 1)]
SIZE_COLORS = {
'B': Printer.YELLOW,
'KB': Printer.CYAN,
'MB': Printer.GREEN,
'GB': Printer.RED,
'TB': Printer.DARK_RED
}
def __init__(self, size: Union[int, float, str, bytes, _FileSizeType]):
"""
Initializes a new FileSize from an integer (or a string) of the bytes amount.
"""
# Handle cases where size is another FileSize instance (Copy C'tor).
if isinstance(size, FileSize):
self.size = size.size
else:
chosen_multiplier = 1
# Handle cases where size is a string like '1,600 KB'.
if isinstance(size, bytes):
size = size.decode('UTF-8')
if isinstance(size, str):
size = size.replace(',', '').lower()
for ms, mi in self.MULTIPLIERS:
if size.endswith(ms):
chosen_multiplier = mi
size = size[:-len(ms)]
break
self.size = int(float(size) * chosen_multiplier)
def __str__(self) -> str:
unit, unit_divider = self._unit_info()
# We multiply then divide by 100 in order to have only two decimal places.
size_in_unit = (self.size * 100) / unit_divider / 100
return f'{size_in_unit:.1f} {unit}'
def __repr__(self) -> str:
return f'<FileSize - {self}>'
def _unit_info(self) -> Tuple[str, int]:
"""
Returns both the best unit to measure the size, and its power.
:return: A tuple containing the unit and its power.
"""
abs_bytes = abs(self.size)
if abs_bytes < 1024:
unit = 'B'
unit_divider = 1
elif abs_bytes < (1024 ** 2):
unit = 'KB'
unit_divider = 1024
elif abs_bytes < (1024 ** 3):
unit = 'MB'
unit_divider = (1024 ** 2)
elif abs_bytes < (1024 ** 4):
unit = 'GB'
unit_divider = (1024 ** 3)
else:
unit = 'TB'
unit_divider = (1024 ** 4)
return unit, unit_divider
@property
def bytes(self) -> int:
return self.size
@property
def kilo_bytes(self) -> int:
return self.bytes // 1024
@property
def mega_bytes(self) -> int:
return self.kilo_bytes // 1024
@staticmethod
def get_file_size_string(size_bytes: int) -> str:
return str(FileSize(size_bytes))
def __add__(self, file_size: Union[int, float, _FileSizeType]) -> _FileSizeType:
"""
Handles adding numbers or file sizes to the file size.
:param file_size: The size to add to the current file size.
:return: A new file size with the combined number of the file sizes.
"""
if isinstance(file_size, FileSize):
return FileSize(self.size + file_size.size)
if isinstance(file_size, (int, float)):
return FileSize(self.size + file_size)
raise TypeError(f'Can\'t add a {type(file_size).__name__} to a file size')
def __sub__(self, file_size: Union[int, float, _FileSizeType]) -> _FileSizeType:
"""
Handles subtracting numbers or file sizes from the file size.
:param file_size: The size to subtract from the current file size.
:return: A new file size with the difference between the file sizes.
"""
if isinstance(file_size, FileSize):
return FileSize(self.size - file_size.size)
if isinstance(file_size, (int, float)):
return FileSize(self.size - file_size)
raise TypeError(f'Can\'t subtract a {type(file_size).__name__} from a file size')
def __int__(self) -> int:
return self.size
def __float__(self) -> float:
return float(self.size)
def __mul__(self, amount: Union[int, float]) -> _FileSizeType:
"""
Multiplies the file size by the specified amount.
:param amount: The amount by which to multiply.
:return: A new file size with the multiplied value of this file size.
"""
if isinstance(amount, (int, float)):
return FileSize(self.size * amount)
raise TypeError(f'Can\'t multiply a file size by a {type(amount).__name__} (only by a number)')
def __truediv__(self, amount: Union[int, float]) -> _FileSizeType:
"""
Divides the file size by the specified amount.
:param amount: The amount by which to divide.
:return: A new file size with the divided value of this file size.
"""
if isinstance(amount, (int, float)):
return FileSize(self.size / amount)
raise TypeError(f'Can\'t divide a file size by a {type(amount).__name__} (only by a number)')
def __floordiv__(self, amount: Union[int, float]) -> _FileSizeType:
"""
Divides the file size by the specified amount and floors the result.
:param amount: The amount by which to divide.
:return: A new file size with the divided value of this file size.
"""
if isinstance(amount, (int, float)):
return FileSize(self.size // amount)
raise TypeError(f'Can\'t divide a file size by a {type(amount).__name__} (only by a number)')
def __lt__(self, other) -> bool:
"""
Returns whether this size is less than the other size.
:param FileSize other: The other size.
"""
return int(self) < int(FileSize(other))
def __le__(self, other) -> bool:
"""
Returns whether this size is less than or equal to the other size.
:param FileSize other: The other size.
"""
return int(self) <= int(FileSize(other))
def __eq__(self, other) -> bool:
"""
Returns whether this size is equal to the other size.
:param FileSize other: The other size.
"""
return other is not None and isinstance(other, (int, float, FileSize)) and int(self) == int(FileSize(other))
def __ne__(self, other) -> bool:
"""
Returns whether this size is not equal to the other size.
:param FileSize other: The other size.
"""
return not self.__eq__(other)
def __gt__(self, other) -> bool:
"""
Returns whether this size is greater than the other size.
:param FileSize other: The other size.
"""
return int(self) > int(FileSize(other))
def __ge__(self, other) -> bool:
"""
Returns whether this size is greater than or equal to the other size.
:param FileSize other: The other size.
"""
return int(self) >= int(FileSize(other))
|
welchbj/sublemon
|
sublemon/subprocess.py
|
SublemonSubprocess.spawn
|
python
|
async def spawn(self):
self._server._pending_set.add(self)
await self._server._sem.acquire()
self._subprocess = await asyncio.create_subprocess_shell(
self._cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
self._began_at = datetime.now()
if self in self._server._pending_set:
self._server._pending_set.remove(self)
self._server._running_set.add(self)
self._began_running_evt.set()
|
Spawn the command wrapped in this object as a subprocess.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/subprocess.py#L49-L61
| null |
class SublemonSubprocess:
"""Logical encapsulation of a subprocess."""
def __init__(self, server: 'Sublemon', cmd: str) -> None:
self._server = server
self._cmd = cmd
self._scheduled_at = datetime.now()
self._uuid = uuid.uuid4()
self._began_at: Optional[datetime] = None
self._exit_code: Optional[int] = None
self._subprocess: Optional[asyncio.subprocess.Process] = None
self._began_running_evt = asyncio.Event()
self._done_running_evt = asyncio.Event()
def __repr__(self) -> str:
return '<SublemonSubprocess [{}]>'.format(str(self))
def __str__(self) -> str:
return '{} -> `{}`'.format(self._scheduled_at, self._cmd)
def __hash__(self) -> int:
return hash((self._cmd, self._uuid,))
def __eq__(self, other) -> bool:
return (self._cmd == other._cmd and
self._scheduled_at == other._scheduled_at)
def __ne__(self, other) -> bool:
return not (self == other)
async def wait_running(self) -> None:
"""Coroutine to wait for this subprocess to begin execution."""
await self._began_running_evt.wait()
async def wait_done(self) -> int:
"""Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess.
"""
await self._done_running_evt.wait()
if self._exit_code is None:
raise SublemonLifetimeError(
'Subprocess exited abnormally with `None` exit code')
return self._exit_code
def _poll(self) -> None:
"""Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks.
"""
if self._subprocess is None:
raise SublemonLifetimeError(
'Attempted to poll a non-active subprocess')
elif self._subprocess.returncode is not None:
self._exit_code = self._subprocess.returncode
self._done_running_evt.set()
self._server._running_set.remove(self)
self._server._sem.release()
@property
async def stdout(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stdout."""
await self.wait_running()
async for line in self._subprocess.stdout: # type: ignore
yield line
@property
async def stderr(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stderr."""
await self.wait_running()
async for line in self._subprocess.stderr: # type: ignore
yield line
@property
def cmd(self) -> str:
"""The shell command that this subprocess will/is/did run."""
return self._cmd
@property
def exit_code(self) -> Optional[int]:
"""The exit code of this subprocess."""
return self._exit_code
@property
def is_pending(self) -> bool:
"""Whether this subprocess is waiting to run."""
return not self._began_running_evt.is_set()
@property
def is_running(self) -> bool:
"""Whether this subprocess is currently running."""
return (self._began_running_evt.is_set() and
not self._done_running_evt.is_set())
@property
def is_done(self) -> bool:
"""Whether this subprocess has completed."""
return self._done_running_evt.is_set()
@property
def scheduled_at(self) -> datetime:
"""The time this object was scheduled on the server."""
return self._scheduled_at
@property
def began_at(self) -> Optional[datetime]:
"""The time the subprocess began execution.
Note:
This will be `None` until the subprocess has actually begun
execution.
"""
return self._began_at
|
welchbj/sublemon
|
sublemon/subprocess.py
|
SublemonSubprocess.wait_done
|
python
|
async def wait_done(self) -> int:
await self._done_running_evt.wait()
if self._exit_code is None:
raise SublemonLifetimeError(
'Subprocess exited abnormally with `None` exit code')
return self._exit_code
|
Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/subprocess.py#L67-L78
| null |
class SublemonSubprocess:
"""Logical encapsulation of a subprocess."""
def __init__(self, server: 'Sublemon', cmd: str) -> None:
self._server = server
self._cmd = cmd
self._scheduled_at = datetime.now()
self._uuid = uuid.uuid4()
self._began_at: Optional[datetime] = None
self._exit_code: Optional[int] = None
self._subprocess: Optional[asyncio.subprocess.Process] = None
self._began_running_evt = asyncio.Event()
self._done_running_evt = asyncio.Event()
def __repr__(self) -> str:
return '<SublemonSubprocess [{}]>'.format(str(self))
def __str__(self) -> str:
return '{} -> `{}`'.format(self._scheduled_at, self._cmd)
def __hash__(self) -> int:
return hash((self._cmd, self._uuid,))
def __eq__(self, other) -> bool:
return (self._cmd == other._cmd and
self._scheduled_at == other._scheduled_at)
def __ne__(self, other) -> bool:
return not (self == other)
async def spawn(self):
"""Spawn the command wrapped in this object as a subprocess."""
self._server._pending_set.add(self)
await self._server._sem.acquire()
self._subprocess = await asyncio.create_subprocess_shell(
self._cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
self._began_at = datetime.now()
if self in self._server._pending_set:
self._server._pending_set.remove(self)
self._server._running_set.add(self)
self._began_running_evt.set()
async def wait_running(self) -> None:
"""Coroutine to wait for this subprocess to begin execution."""
await self._began_running_evt.wait()
def _poll(self) -> None:
"""Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks.
"""
if self._subprocess is None:
raise SublemonLifetimeError(
'Attempted to poll a non-active subprocess')
elif self._subprocess.returncode is not None:
self._exit_code = self._subprocess.returncode
self._done_running_evt.set()
self._server._running_set.remove(self)
self._server._sem.release()
@property
async def stdout(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stdout."""
await self.wait_running()
async for line in self._subprocess.stdout: # type: ignore
yield line
@property
async def stderr(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stderr."""
await self.wait_running()
async for line in self._subprocess.stderr: # type: ignore
yield line
@property
def cmd(self) -> str:
"""The shell command that this subprocess will/is/did run."""
return self._cmd
@property
def exit_code(self) -> Optional[int]:
"""The exit code of this subprocess."""
return self._exit_code
@property
def is_pending(self) -> bool:
"""Whether this subprocess is waiting to run."""
return not self._began_running_evt.is_set()
@property
def is_running(self) -> bool:
"""Whether this subprocess is currently running."""
return (self._began_running_evt.is_set() and
not self._done_running_evt.is_set())
@property
def is_done(self) -> bool:
"""Whether this subprocess has completed."""
return self._done_running_evt.is_set()
@property
def scheduled_at(self) -> datetime:
"""The time this object was scheduled on the server."""
return self._scheduled_at
@property
def began_at(self) -> Optional[datetime]:
"""The time the subprocess began execution.
Note:
This will be `None` until the subprocess has actually begun
execution.
"""
return self._began_at
|
welchbj/sublemon
|
sublemon/subprocess.py
|
SublemonSubprocess._poll
|
python
|
def _poll(self) -> None:
if self._subprocess is None:
raise SublemonLifetimeError(
'Attempted to poll a non-active subprocess')
elif self._subprocess.returncode is not None:
self._exit_code = self._subprocess.returncode
self._done_running_evt.set()
self._server._running_set.remove(self)
self._server._sem.release()
|
Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/subprocess.py#L80-L94
| null |
class SublemonSubprocess:
"""Logical encapsulation of a subprocess."""
def __init__(self, server: 'Sublemon', cmd: str) -> None:
self._server = server
self._cmd = cmd
self._scheduled_at = datetime.now()
self._uuid = uuid.uuid4()
self._began_at: Optional[datetime] = None
self._exit_code: Optional[int] = None
self._subprocess: Optional[asyncio.subprocess.Process] = None
self._began_running_evt = asyncio.Event()
self._done_running_evt = asyncio.Event()
def __repr__(self) -> str:
return '<SublemonSubprocess [{}]>'.format(str(self))
def __str__(self) -> str:
return '{} -> `{}`'.format(self._scheduled_at, self._cmd)
def __hash__(self) -> int:
return hash((self._cmd, self._uuid,))
def __eq__(self, other) -> bool:
return (self._cmd == other._cmd and
self._scheduled_at == other._scheduled_at)
def __ne__(self, other) -> bool:
return not (self == other)
async def spawn(self):
"""Spawn the command wrapped in this object as a subprocess."""
self._server._pending_set.add(self)
await self._server._sem.acquire()
self._subprocess = await asyncio.create_subprocess_shell(
self._cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
self._began_at = datetime.now()
if self in self._server._pending_set:
self._server._pending_set.remove(self)
self._server._running_set.add(self)
self._began_running_evt.set()
async def wait_running(self) -> None:
"""Coroutine to wait for this subprocess to begin execution."""
await self._began_running_evt.wait()
async def wait_done(self) -> int:
"""Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess.
"""
await self._done_running_evt.wait()
if self._exit_code is None:
raise SublemonLifetimeError(
'Subprocess exited abnormally with `None` exit code')
return self._exit_code
@property
async def stdout(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stdout."""
await self.wait_running()
async for line in self._subprocess.stdout: # type: ignore
yield line
@property
async def stderr(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stderr."""
await self.wait_running()
async for line in self._subprocess.stderr: # type: ignore
yield line
@property
def cmd(self) -> str:
"""The shell command that this subprocess will/is/did run."""
return self._cmd
@property
def exit_code(self) -> Optional[int]:
"""The exit code of this subprocess."""
return self._exit_code
@property
def is_pending(self) -> bool:
"""Whether this subprocess is waiting to run."""
return not self._began_running_evt.is_set()
@property
def is_running(self) -> bool:
"""Whether this subprocess is currently running."""
return (self._began_running_evt.is_set() and
not self._done_running_evt.is_set())
@property
def is_done(self) -> bool:
"""Whether this subprocess has completed."""
return self._done_running_evt.is_set()
@property
def scheduled_at(self) -> datetime:
"""The time this object was scheduled on the server."""
return self._scheduled_at
@property
def began_at(self) -> Optional[datetime]:
"""The time the subprocess began execution.
Note:
This will be `None` until the subprocess has actually begun
execution.
"""
return self._began_at
|
welchbj/sublemon
|
sublemon/subprocess.py
|
SublemonSubprocess.stdout
|
python
|
async def stdout(self) -> AsyncGenerator[str, None]:
await self.wait_running()
async for line in self._subprocess.stdout: # type: ignore
yield line
|
Asynchronous generator for lines from subprocess stdout.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/subprocess.py#L97-L101
|
[
"async def wait_running(self) -> None:\n \"\"\"Coroutine to wait for this subprocess to begin execution.\"\"\"\n await self._began_running_evt.wait()\n"
] |
class SublemonSubprocess:
"""Logical encapsulation of a subprocess."""
def __init__(self, server: 'Sublemon', cmd: str) -> None:
self._server = server
self._cmd = cmd
self._scheduled_at = datetime.now()
self._uuid = uuid.uuid4()
self._began_at: Optional[datetime] = None
self._exit_code: Optional[int] = None
self._subprocess: Optional[asyncio.subprocess.Process] = None
self._began_running_evt = asyncio.Event()
self._done_running_evt = asyncio.Event()
def __repr__(self) -> str:
return '<SublemonSubprocess [{}]>'.format(str(self))
def __str__(self) -> str:
return '{} -> `{}`'.format(self._scheduled_at, self._cmd)
def __hash__(self) -> int:
return hash((self._cmd, self._uuid,))
def __eq__(self, other) -> bool:
return (self._cmd == other._cmd and
self._scheduled_at == other._scheduled_at)
def __ne__(self, other) -> bool:
return not (self == other)
async def spawn(self):
"""Spawn the command wrapped in this object as a subprocess."""
self._server._pending_set.add(self)
await self._server._sem.acquire()
self._subprocess = await asyncio.create_subprocess_shell(
self._cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
self._began_at = datetime.now()
if self in self._server._pending_set:
self._server._pending_set.remove(self)
self._server._running_set.add(self)
self._began_running_evt.set()
async def wait_running(self) -> None:
"""Coroutine to wait for this subprocess to begin execution."""
await self._began_running_evt.wait()
async def wait_done(self) -> int:
"""Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess.
"""
await self._done_running_evt.wait()
if self._exit_code is None:
raise SublemonLifetimeError(
'Subprocess exited abnormally with `None` exit code')
return self._exit_code
def _poll(self) -> None:
"""Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks.
"""
if self._subprocess is None:
raise SublemonLifetimeError(
'Attempted to poll a non-active subprocess')
elif self._subprocess.returncode is not None:
self._exit_code = self._subprocess.returncode
self._done_running_evt.set()
self._server._running_set.remove(self)
self._server._sem.release()
@property
@property
async def stderr(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stderr."""
await self.wait_running()
async for line in self._subprocess.stderr: # type: ignore
yield line
@property
def cmd(self) -> str:
"""The shell command that this subprocess will/is/did run."""
return self._cmd
@property
def exit_code(self) -> Optional[int]:
"""The exit code of this subprocess."""
return self._exit_code
@property
def is_pending(self) -> bool:
"""Whether this subprocess is waiting to run."""
return not self._began_running_evt.is_set()
@property
def is_running(self) -> bool:
"""Whether this subprocess is currently running."""
return (self._began_running_evt.is_set() and
not self._done_running_evt.is_set())
@property
def is_done(self) -> bool:
"""Whether this subprocess has completed."""
return self._done_running_evt.is_set()
@property
def scheduled_at(self) -> datetime:
"""The time this object was scheduled on the server."""
return self._scheduled_at
@property
def began_at(self) -> Optional[datetime]:
"""The time the subprocess began execution.
Note:
This will be `None` until the subprocess has actually begun
execution.
"""
return self._began_at
|
welchbj/sublemon
|
sublemon/subprocess.py
|
SublemonSubprocess.stderr
|
python
|
async def stderr(self) -> AsyncGenerator[str, None]:
await self.wait_running()
async for line in self._subprocess.stderr: # type: ignore
yield line
|
Asynchronous generator for lines from subprocess stderr.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/subprocess.py#L104-L108
|
[
"async def wait_running(self) -> None:\n \"\"\"Coroutine to wait for this subprocess to begin execution.\"\"\"\n await self._began_running_evt.wait()\n"
] |
class SublemonSubprocess:
"""Logical encapsulation of a subprocess."""
def __init__(self, server: 'Sublemon', cmd: str) -> None:
self._server = server
self._cmd = cmd
self._scheduled_at = datetime.now()
self._uuid = uuid.uuid4()
self._began_at: Optional[datetime] = None
self._exit_code: Optional[int] = None
self._subprocess: Optional[asyncio.subprocess.Process] = None
self._began_running_evt = asyncio.Event()
self._done_running_evt = asyncio.Event()
def __repr__(self) -> str:
return '<SublemonSubprocess [{}]>'.format(str(self))
def __str__(self) -> str:
return '{} -> `{}`'.format(self._scheduled_at, self._cmd)
def __hash__(self) -> int:
return hash((self._cmd, self._uuid,))
def __eq__(self, other) -> bool:
return (self._cmd == other._cmd and
self._scheduled_at == other._scheduled_at)
def __ne__(self, other) -> bool:
return not (self == other)
async def spawn(self):
"""Spawn the command wrapped in this object as a subprocess."""
self._server._pending_set.add(self)
await self._server._sem.acquire()
self._subprocess = await asyncio.create_subprocess_shell(
self._cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
self._began_at = datetime.now()
if self in self._server._pending_set:
self._server._pending_set.remove(self)
self._server._running_set.add(self)
self._began_running_evt.set()
async def wait_running(self) -> None:
"""Coroutine to wait for this subprocess to begin execution."""
await self._began_running_evt.wait()
async def wait_done(self) -> int:
"""Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess.
"""
await self._done_running_evt.wait()
if self._exit_code is None:
raise SublemonLifetimeError(
'Subprocess exited abnormally with `None` exit code')
return self._exit_code
def _poll(self) -> None:
"""Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks.
"""
if self._subprocess is None:
raise SublemonLifetimeError(
'Attempted to poll a non-active subprocess')
elif self._subprocess.returncode is not None:
self._exit_code = self._subprocess.returncode
self._done_running_evt.set()
self._server._running_set.remove(self)
self._server._sem.release()
@property
async def stdout(self) -> AsyncGenerator[str, None]:
"""Asynchronous generator for lines from subprocess stdout."""
await self.wait_running()
async for line in self._subprocess.stdout: # type: ignore
yield line
@property
@property
def cmd(self) -> str:
"""The shell command that this subprocess will/is/did run."""
return self._cmd
@property
def exit_code(self) -> Optional[int]:
"""The exit code of this subprocess."""
return self._exit_code
@property
def is_pending(self) -> bool:
"""Whether this subprocess is waiting to run."""
return not self._began_running_evt.is_set()
@property
def is_running(self) -> bool:
"""Whether this subprocess is currently running."""
return (self._began_running_evt.is_set() and
not self._done_running_evt.is_set())
@property
def is_done(self) -> bool:
"""Whether this subprocess has completed."""
return self._done_running_evt.is_set()
@property
def scheduled_at(self) -> datetime:
"""The time this object was scheduled on the server."""
return self._scheduled_at
@property
def began_at(self) -> Optional[datetime]:
"""The time the subprocess began execution.
Note:
This will be `None` until the subprocess has actually begun
execution.
"""
return self._began_at
|
welchbj/sublemon
|
demos/from_the_readme.py
|
main
|
python
|
async def main():
for c in (1, 2, 4,):
async with Sublemon(max_concurrency=c) as s:
start = time.perf_counter()
await asyncio.gather(one(s), two(s))
end = time.perf_counter()
print('Limiting to', c, 'concurrent subprocess(es) took',
end-start, 'seconds\n')
|
`sublemon` library example!
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/demos/from_the_readme.py#L12-L20
|
[
"async def one(s: Sublemon):\n \"\"\"Spin up some subprocesses, sleep, and echo a message for this coro.\"\"\"\n shell_cmds = [\n 'sleep 1 && echo subprocess 1 in coroutine one',\n 'sleep 1 && echo subprocess 2 in coroutine one']\n async for line in s.iter_lines(*shell_cmds):\n print(line)\n",
"async def two(s: Sublemon):\n \"\"\"Spin up some subprocesses, sleep, and echo a message for this coro.\"\"\"\n subprocess_1, subprocess_2 = s.spawn(\n 'sleep 1 && echo subprocess 1 in coroutine two',\n 'sleep 1 && echo subprocess 2 in coroutine two')\n async for line in amerge(subprocess_1.stdout, subprocess_2.stdout):\n print(line.decode('utf-8'), end='')\n"
] |
"""Demo from the README."""
import asyncio
import time
from sublemon import (
amerge,
crossplat_loop_run,
Sublemon)
async def one(s: Sublemon):
"""Spin up some subprocesses, sleep, and echo a message for this coro."""
shell_cmds = [
'sleep 1 && echo subprocess 1 in coroutine one',
'sleep 1 && echo subprocess 2 in coroutine one']
async for line in s.iter_lines(*shell_cmds):
print(line)
async def two(s: Sublemon):
"""Spin up some subprocesses, sleep, and echo a message for this coro."""
subprocess_1, subprocess_2 = s.spawn(
'sleep 1 && echo subprocess 1 in coroutine two',
'sleep 1 && echo subprocess 2 in coroutine two')
async for line in amerge(subprocess_1.stdout, subprocess_2.stdout):
print(line.decode('utf-8'), end='')
if __name__ == '__main__':
crossplat_loop_run(main())
|
welchbj/sublemon
|
demos/from_the_readme.py
|
one
|
python
|
async def one(s: Sublemon):
shell_cmds = [
'sleep 1 && echo subprocess 1 in coroutine one',
'sleep 1 && echo subprocess 2 in coroutine one']
async for line in s.iter_lines(*shell_cmds):
print(line)
|
Spin up some subprocesses, sleep, and echo a message for this coro.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/demos/from_the_readme.py#L23-L29
| null |
"""Demo from the README."""
import asyncio
import time
from sublemon import (
amerge,
crossplat_loop_run,
Sublemon)
async def main():
"""`sublemon` library example!"""
for c in (1, 2, 4,):
async with Sublemon(max_concurrency=c) as s:
start = time.perf_counter()
await asyncio.gather(one(s), two(s))
end = time.perf_counter()
print('Limiting to', c, 'concurrent subprocess(es) took',
end-start, 'seconds\n')
async def two(s: Sublemon):
"""Spin up some subprocesses, sleep, and echo a message for this coro."""
subprocess_1, subprocess_2 = s.spawn(
'sleep 1 && echo subprocess 1 in coroutine two',
'sleep 1 && echo subprocess 2 in coroutine two')
async for line in amerge(subprocess_1.stdout, subprocess_2.stdout):
print(line.decode('utf-8'), end='')
if __name__ == '__main__':
crossplat_loop_run(main())
|
welchbj/sublemon
|
demos/from_the_readme.py
|
two
|
python
|
async def two(s: Sublemon):
subprocess_1, subprocess_2 = s.spawn(
'sleep 1 && echo subprocess 1 in coroutine two',
'sleep 1 && echo subprocess 2 in coroutine two')
async for line in amerge(subprocess_1.stdout, subprocess_2.stdout):
print(line.decode('utf-8'), end='')
|
Spin up some subprocesses, sleep, and echo a message for this coro.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/demos/from_the_readme.py#L32-L38
|
[
"async def amerge(*agens) -> AsyncGenerator[Any, None]:\n \"\"\"Thin wrapper around aiostream.stream.merge.\"\"\"\n xs = stream.merge(*agens)\n async with xs.stream() as streamer:\n async for x in streamer:\n yield x\n"
] |
"""Demo from the README."""
import asyncio
import time
from sublemon import (
amerge,
crossplat_loop_run,
Sublemon)
async def main():
"""`sublemon` library example!"""
for c in (1, 2, 4,):
async with Sublemon(max_concurrency=c) as s:
start = time.perf_counter()
await asyncio.gather(one(s), two(s))
end = time.perf_counter()
print('Limiting to', c, 'concurrent subprocess(es) took',
end-start, 'seconds\n')
async def one(s: Sublemon):
"""Spin up some subprocesses, sleep, and echo a message for this coro."""
shell_cmds = [
'sleep 1 && echo subprocess 1 in coroutine one',
'sleep 1 && echo subprocess 2 in coroutine one']
async for line in s.iter_lines(*shell_cmds):
print(line)
if __name__ == '__main__':
crossplat_loop_run(main())
|
welchbj/sublemon
|
sublemon/utils.py
|
amerge
|
python
|
async def amerge(*agens) -> AsyncGenerator[Any, None]:
xs = stream.merge(*agens)
async with xs.stream() as streamer:
async for x in streamer:
yield x
|
Thin wrapper around aiostream.stream.merge.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/utils.py#L14-L19
| null |
"""Asynchronous / misc utilities."""
import asyncio
import contextlib
import signal
import sys
from aiostream import stream
from typing import (
Any,
AsyncGenerator)
def crossplat_loop_run(coro) -> Any:
"""Cross-platform method for running a subprocess-spawning coroutine."""
if sys.platform == 'win32':
signal.signal(signal.SIGINT, signal.SIG_DFL)
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
with contextlib.closing(loop):
return loop.run_until_complete(coro)
|
welchbj/sublemon
|
sublemon/utils.py
|
crossplat_loop_run
|
python
|
def crossplat_loop_run(coro) -> Any:
if sys.platform == 'win32':
signal.signal(signal.SIGINT, signal.SIG_DFL)
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
with contextlib.closing(loop):
return loop.run_until_complete(coro)
|
Cross-platform method for running a subprocess-spawning coroutine.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/utils.py#L22-L32
| null |
"""Asynchronous / misc utilities."""
import asyncio
import contextlib
import signal
import sys
from aiostream import stream
from typing import (
Any,
AsyncGenerator)
async def amerge(*agens) -> AsyncGenerator[Any, None]:
"""Thin wrapper around aiostream.stream.merge."""
xs = stream.merge(*agens)
async with xs.stream() as streamer:
async for x in streamer:
yield x
|
welchbj/sublemon
|
sublemon/runtime.py
|
Sublemon.start
|
python
|
async def start(self) -> None:
if self._is_running:
raise SublemonRuntimeError(
'Attempted to start an already-running `Sublemon` instance')
self._poll_task = asyncio.ensure_future(self._poll())
self._is_running = True
|
Coroutine to run this server.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L52-L59
|
[
"async def _poll(self) -> None:\n \"\"\"Coroutine to poll status of running subprocesses.\"\"\"\n while True:\n await asyncio.sleep(self._poll_delta)\n for subproc in list(self._running_set):\n subproc._poll()\n"
] |
class Sublemon:
"""The runtime for spawning subprocesses."""
def __init__(self, max_concurrency: int=_DEFAULT_MC,
poll_delta: float=_DEFAULT_PD) -> None:
self._max_concurrency = max_concurrency
self._poll_delta = poll_delta
self._sem = asyncio.BoundedSemaphore(max_concurrency)
self._is_running = False
self._pending_set: Set[SublemonSubprocess] = set()
self._running_set: Set[SublemonSubprocess] = set()
def __str__(self):
return ('max concurrency: {}, poll delta: {}, {} running and {} '
'pending subprocesses').format(
self._max_concurrency,
self._poll_delta,
len(self._running_set),
len(self._pending_set))
def __repr__(self):
return '<Sublemon [{}]>'.format(str(self))
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def stop(self) -> None:
"""Coroutine to stop execution of this server."""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to stop an already-stopped `Sublemon` instance')
await self.block()
self._poll_task.cancel()
self._is_running = False
with suppress(asyncio.CancelledError):
await self._poll_task
async def _poll(self) -> None:
"""Coroutine to poll status of running subprocesses."""
while True:
await asyncio.sleep(self._poll_delta)
for subproc in list(self._running_set):
subproc._poll()
async def iter_lines(
self,
*cmds: str,
stream: str='both') -> AsyncGenerator[str, None]:
"""Coroutine to spawn commands and yield text lines from stdout."""
sps = self.spawn(*cmds)
if stream == 'both':
agen = amerge(
amerge(*[sp.stdout for sp in sps]),
amerge(*[sp.stderr for sp in sps]))
elif stream == 'stdout':
agen = amerge(*[sp.stdout for sp in sps])
elif stream == 'stderr':
agen = amerge(*[sp.stderr for sp in sps])
else:
raise SublemonRuntimeError(
'Invalid `stream` kwarg received: `' + str(stream) + '`')
async for line in agen:
yield line.decode('utf-8').rstrip()
async def gather(self, *cmds: str) -> Tuple[int]:
"""Coroutine to spawn subprocesses and block until completion.
Note:
The same `max_concurrency` restriction that applies to `spawn`
also applies here.
Returns:
The exit codes of the spawned subprocesses, in the order they were
passed.
"""
subprocs = self.spawn(*cmds)
subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]
return await asyncio.gather(*subproc_wait_coros) # type: ignore
async def block(self) -> None:
"""Block until all running and pending subprocesses have finished."""
await asyncio.gather(
*itertools.chain(
(sp.wait_done() for sp in self._running_set),
(sp.wait_done() for sp in self._pending_set)))
def spawn(self, *cmds: str) -> List[SublemonSubprocess]:
"""Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore.
"""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to spawn subprocesses from a non-started server')
subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]
for sp in subprocs:
asyncio.ensure_future(sp.spawn())
return subprocs
@property
def running_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the currently-executing subprocesses."""
return self._running_set
@property
def pending_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the subprocesses waiting to begin execution."""
return self._pending_set
@property
def max_concurrency(self) -> int:
"""The max number of subprocesses that can be running concurrently."""
return self._max_concurrency
@property
def poll_delta(self) -> float:
"""The number of seconds to sleep in between polls of subprocesses."""
return self._poll_delta
|
welchbj/sublemon
|
sublemon/runtime.py
|
Sublemon.stop
|
python
|
async def stop(self) -> None:
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to stop an already-stopped `Sublemon` instance')
await self.block()
self._poll_task.cancel()
self._is_running = False
with suppress(asyncio.CancelledError):
await self._poll_task
|
Coroutine to stop execution of this server.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L61-L71
|
[
"async def block(self) -> None:\n \"\"\"Block until all running and pending subprocesses have finished.\"\"\"\n await asyncio.gather(\n *itertools.chain(\n (sp.wait_done() for sp in self._running_set),\n (sp.wait_done() for sp in self._pending_set)))\n"
] |
class Sublemon:
"""The runtime for spawning subprocesses."""
def __init__(self, max_concurrency: int=_DEFAULT_MC,
poll_delta: float=_DEFAULT_PD) -> None:
self._max_concurrency = max_concurrency
self._poll_delta = poll_delta
self._sem = asyncio.BoundedSemaphore(max_concurrency)
self._is_running = False
self._pending_set: Set[SublemonSubprocess] = set()
self._running_set: Set[SublemonSubprocess] = set()
def __str__(self):
return ('max concurrency: {}, poll delta: {}, {} running and {} '
'pending subprocesses').format(
self._max_concurrency,
self._poll_delta,
len(self._running_set),
len(self._pending_set))
def __repr__(self):
return '<Sublemon [{}]>'.format(str(self))
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def start(self) -> None:
"""Coroutine to run this server."""
if self._is_running:
raise SublemonRuntimeError(
'Attempted to start an already-running `Sublemon` instance')
self._poll_task = asyncio.ensure_future(self._poll())
self._is_running = True
async def _poll(self) -> None:
"""Coroutine to poll status of running subprocesses."""
while True:
await asyncio.sleep(self._poll_delta)
for subproc in list(self._running_set):
subproc._poll()
async def iter_lines(
self,
*cmds: str,
stream: str='both') -> AsyncGenerator[str, None]:
"""Coroutine to spawn commands and yield text lines from stdout."""
sps = self.spawn(*cmds)
if stream == 'both':
agen = amerge(
amerge(*[sp.stdout for sp in sps]),
amerge(*[sp.stderr for sp in sps]))
elif stream == 'stdout':
agen = amerge(*[sp.stdout for sp in sps])
elif stream == 'stderr':
agen = amerge(*[sp.stderr for sp in sps])
else:
raise SublemonRuntimeError(
'Invalid `stream` kwarg received: `' + str(stream) + '`')
async for line in agen:
yield line.decode('utf-8').rstrip()
async def gather(self, *cmds: str) -> Tuple[int]:
"""Coroutine to spawn subprocesses and block until completion.
Note:
The same `max_concurrency` restriction that applies to `spawn`
also applies here.
Returns:
The exit codes of the spawned subprocesses, in the order they were
passed.
"""
subprocs = self.spawn(*cmds)
subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]
return await asyncio.gather(*subproc_wait_coros) # type: ignore
async def block(self) -> None:
"""Block until all running and pending subprocesses have finished."""
await asyncio.gather(
*itertools.chain(
(sp.wait_done() for sp in self._running_set),
(sp.wait_done() for sp in self._pending_set)))
def spawn(self, *cmds: str) -> List[SublemonSubprocess]:
"""Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore.
"""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to spawn subprocesses from a non-started server')
subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]
for sp in subprocs:
asyncio.ensure_future(sp.spawn())
return subprocs
@property
def running_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the currently-executing subprocesses."""
return self._running_set
@property
def pending_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the subprocesses waiting to begin execution."""
return self._pending_set
@property
def max_concurrency(self) -> int:
"""The max number of subprocesses that can be running concurrently."""
return self._max_concurrency
@property
def poll_delta(self) -> float:
"""The number of seconds to sleep in between polls of subprocesses."""
return self._poll_delta
|
welchbj/sublemon
|
sublemon/runtime.py
|
Sublemon._poll
|
python
|
async def _poll(self) -> None:
while True:
await asyncio.sleep(self._poll_delta)
for subproc in list(self._running_set):
subproc._poll()
|
Coroutine to poll status of running subprocesses.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L73-L78
| null |
class Sublemon:
"""The runtime for spawning subprocesses."""
def __init__(self, max_concurrency: int=_DEFAULT_MC,
poll_delta: float=_DEFAULT_PD) -> None:
self._max_concurrency = max_concurrency
self._poll_delta = poll_delta
self._sem = asyncio.BoundedSemaphore(max_concurrency)
self._is_running = False
self._pending_set: Set[SublemonSubprocess] = set()
self._running_set: Set[SublemonSubprocess] = set()
def __str__(self):
return ('max concurrency: {}, poll delta: {}, {} running and {} '
'pending subprocesses').format(
self._max_concurrency,
self._poll_delta,
len(self._running_set),
len(self._pending_set))
def __repr__(self):
return '<Sublemon [{}]>'.format(str(self))
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def start(self) -> None:
"""Coroutine to run this server."""
if self._is_running:
raise SublemonRuntimeError(
'Attempted to start an already-running `Sublemon` instance')
self._poll_task = asyncio.ensure_future(self._poll())
self._is_running = True
async def stop(self) -> None:
"""Coroutine to stop execution of this server."""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to stop an already-stopped `Sublemon` instance')
await self.block()
self._poll_task.cancel()
self._is_running = False
with suppress(asyncio.CancelledError):
await self._poll_task
async def iter_lines(
self,
*cmds: str,
stream: str='both') -> AsyncGenerator[str, None]:
"""Coroutine to spawn commands and yield text lines from stdout."""
sps = self.spawn(*cmds)
if stream == 'both':
agen = amerge(
amerge(*[sp.stdout for sp in sps]),
amerge(*[sp.stderr for sp in sps]))
elif stream == 'stdout':
agen = amerge(*[sp.stdout for sp in sps])
elif stream == 'stderr':
agen = amerge(*[sp.stderr for sp in sps])
else:
raise SublemonRuntimeError(
'Invalid `stream` kwarg received: `' + str(stream) + '`')
async for line in agen:
yield line.decode('utf-8').rstrip()
async def gather(self, *cmds: str) -> Tuple[int]:
"""Coroutine to spawn subprocesses and block until completion.
Note:
The same `max_concurrency` restriction that applies to `spawn`
also applies here.
Returns:
The exit codes of the spawned subprocesses, in the order they were
passed.
"""
subprocs = self.spawn(*cmds)
subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]
return await asyncio.gather(*subproc_wait_coros) # type: ignore
async def block(self) -> None:
"""Block until all running and pending subprocesses have finished."""
await asyncio.gather(
*itertools.chain(
(sp.wait_done() for sp in self._running_set),
(sp.wait_done() for sp in self._pending_set)))
def spawn(self, *cmds: str) -> List[SublemonSubprocess]:
"""Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore.
"""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to spawn subprocesses from a non-started server')
subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]
for sp in subprocs:
asyncio.ensure_future(sp.spawn())
return subprocs
@property
def running_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the currently-executing subprocesses."""
return self._running_set
@property
def pending_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the subprocesses waiting to begin execution."""
return self._pending_set
@property
def max_concurrency(self) -> int:
"""The max number of subprocesses that can be running concurrently."""
return self._max_concurrency
@property
def poll_delta(self) -> float:
"""The number of seconds to sleep in between polls of subprocesses."""
return self._poll_delta
|
welchbj/sublemon
|
sublemon/runtime.py
|
Sublemon.iter_lines
|
python
|
async def iter_lines(
self,
*cmds: str,
stream: str='both') -> AsyncGenerator[str, None]:
sps = self.spawn(*cmds)
if stream == 'both':
agen = amerge(
amerge(*[sp.stdout for sp in sps]),
amerge(*[sp.stderr for sp in sps]))
elif stream == 'stdout':
agen = amerge(*[sp.stdout for sp in sps])
elif stream == 'stderr':
agen = amerge(*[sp.stderr for sp in sps])
else:
raise SublemonRuntimeError(
'Invalid `stream` kwarg received: `' + str(stream) + '`')
async for line in agen:
yield line.decode('utf-8').rstrip()
|
Coroutine to spawn commands and yield text lines from stdout.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L80-L98
|
[
"async def amerge(*agens) -> AsyncGenerator[Any, None]:\n \"\"\"Thin wrapper around aiostream.stream.merge.\"\"\"\n xs = stream.merge(*agens)\n async with xs.stream() as streamer:\n async for x in streamer:\n yield x\n",
"def spawn(self, *cmds: str) -> List[SublemonSubprocess]:\n \"\"\"Coroutine to spawn shell commands.\n\n If `max_concurrency` is reached during the attempt to spawn the\n specified subprocesses, excess subprocesses will block while attempting\n to acquire this server's semaphore.\n\n \"\"\"\n if not self._is_running:\n raise SublemonRuntimeError(\n 'Attempted to spawn subprocesses from a non-started server')\n\n subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]\n for sp in subprocs:\n asyncio.ensure_future(sp.spawn())\n return subprocs\n"
] |
class Sublemon:
"""The runtime for spawning subprocesses."""
def __init__(self, max_concurrency: int=_DEFAULT_MC,
poll_delta: float=_DEFAULT_PD) -> None:
self._max_concurrency = max_concurrency
self._poll_delta = poll_delta
self._sem = asyncio.BoundedSemaphore(max_concurrency)
self._is_running = False
self._pending_set: Set[SublemonSubprocess] = set()
self._running_set: Set[SublemonSubprocess] = set()
def __str__(self):
return ('max concurrency: {}, poll delta: {}, {} running and {} '
'pending subprocesses').format(
self._max_concurrency,
self._poll_delta,
len(self._running_set),
len(self._pending_set))
def __repr__(self):
return '<Sublemon [{}]>'.format(str(self))
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def start(self) -> None:
"""Coroutine to run this server."""
if self._is_running:
raise SublemonRuntimeError(
'Attempted to start an already-running `Sublemon` instance')
self._poll_task = asyncio.ensure_future(self._poll())
self._is_running = True
async def stop(self) -> None:
"""Coroutine to stop execution of this server."""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to stop an already-stopped `Sublemon` instance')
await self.block()
self._poll_task.cancel()
self._is_running = False
with suppress(asyncio.CancelledError):
await self._poll_task
async def _poll(self) -> None:
"""Coroutine to poll status of running subprocesses."""
while True:
await asyncio.sleep(self._poll_delta)
for subproc in list(self._running_set):
subproc._poll()
async def gather(self, *cmds: str) -> Tuple[int]:
"""Coroutine to spawn subprocesses and block until completion.
Note:
The same `max_concurrency` restriction that applies to `spawn`
also applies here.
Returns:
The exit codes of the spawned subprocesses, in the order they were
passed.
"""
subprocs = self.spawn(*cmds)
subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]
return await asyncio.gather(*subproc_wait_coros) # type: ignore
async def block(self) -> None:
"""Block until all running and pending subprocesses have finished."""
await asyncio.gather(
*itertools.chain(
(sp.wait_done() for sp in self._running_set),
(sp.wait_done() for sp in self._pending_set)))
def spawn(self, *cmds: str) -> List[SublemonSubprocess]:
"""Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore.
"""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to spawn subprocesses from a non-started server')
subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]
for sp in subprocs:
asyncio.ensure_future(sp.spawn())
return subprocs
@property
def running_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the currently-executing subprocesses."""
return self._running_set
@property
def pending_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the subprocesses waiting to begin execution."""
return self._pending_set
@property
def max_concurrency(self) -> int:
"""The max number of subprocesses that can be running concurrently."""
return self._max_concurrency
@property
def poll_delta(self) -> float:
"""The number of seconds to sleep in between polls of subprocesses."""
return self._poll_delta
|
welchbj/sublemon
|
sublemon/runtime.py
|
Sublemon.gather
|
python
|
async def gather(self, *cmds: str) -> Tuple[int]:
subprocs = self.spawn(*cmds)
subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]
return await asyncio.gather(*subproc_wait_coros)
|
Coroutine to spawn subprocesses and block until completion.
Note:
The same `max_concurrency` restriction that applies to `spawn`
also applies here.
Returns:
The exit codes of the spawned subprocesses, in the order they were
passed.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L100-L114
|
[
"def spawn(self, *cmds: str) -> List[SublemonSubprocess]:\n \"\"\"Coroutine to spawn shell commands.\n\n If `max_concurrency` is reached during the attempt to spawn the\n specified subprocesses, excess subprocesses will block while attempting\n to acquire this server's semaphore.\n\n \"\"\"\n if not self._is_running:\n raise SublemonRuntimeError(\n 'Attempted to spawn subprocesses from a non-started server')\n\n subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]\n for sp in subprocs:\n asyncio.ensure_future(sp.spawn())\n return subprocs\n"
] |
class Sublemon:
"""The runtime for spawning subprocesses."""
def __init__(self, max_concurrency: int=_DEFAULT_MC,
poll_delta: float=_DEFAULT_PD) -> None:
self._max_concurrency = max_concurrency
self._poll_delta = poll_delta
self._sem = asyncio.BoundedSemaphore(max_concurrency)
self._is_running = False
self._pending_set: Set[SublemonSubprocess] = set()
self._running_set: Set[SublemonSubprocess] = set()
def __str__(self):
return ('max concurrency: {}, poll delta: {}, {} running and {} '
'pending subprocesses').format(
self._max_concurrency,
self._poll_delta,
len(self._running_set),
len(self._pending_set))
def __repr__(self):
return '<Sublemon [{}]>'.format(str(self))
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def start(self) -> None:
"""Coroutine to run this server."""
if self._is_running:
raise SublemonRuntimeError(
'Attempted to start an already-running `Sublemon` instance')
self._poll_task = asyncio.ensure_future(self._poll())
self._is_running = True
async def stop(self) -> None:
"""Coroutine to stop execution of this server."""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to stop an already-stopped `Sublemon` instance')
await self.block()
self._poll_task.cancel()
self._is_running = False
with suppress(asyncio.CancelledError):
await self._poll_task
async def _poll(self) -> None:
"""Coroutine to poll status of running subprocesses."""
while True:
await asyncio.sleep(self._poll_delta)
for subproc in list(self._running_set):
subproc._poll()
async def iter_lines(
self,
*cmds: str,
stream: str='both') -> AsyncGenerator[str, None]:
"""Coroutine to spawn commands and yield text lines from stdout."""
sps = self.spawn(*cmds)
if stream == 'both':
agen = amerge(
amerge(*[sp.stdout for sp in sps]),
amerge(*[sp.stderr for sp in sps]))
elif stream == 'stdout':
agen = amerge(*[sp.stdout for sp in sps])
elif stream == 'stderr':
agen = amerge(*[sp.stderr for sp in sps])
else:
raise SublemonRuntimeError(
'Invalid `stream` kwarg received: `' + str(stream) + '`')
async for line in agen:
yield line.decode('utf-8').rstrip()
# type: ignore
async def block(self) -> None:
"""Block until all running and pending subprocesses have finished."""
await asyncio.gather(
*itertools.chain(
(sp.wait_done() for sp in self._running_set),
(sp.wait_done() for sp in self._pending_set)))
def spawn(self, *cmds: str) -> List[SublemonSubprocess]:
"""Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore.
"""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to spawn subprocesses from a non-started server')
subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]
for sp in subprocs:
asyncio.ensure_future(sp.spawn())
return subprocs
@property
def running_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the currently-executing subprocesses."""
return self._running_set
@property
def pending_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the subprocesses waiting to begin execution."""
return self._pending_set
@property
def max_concurrency(self) -> int:
"""The max number of subprocesses that can be running concurrently."""
return self._max_concurrency
@property
def poll_delta(self) -> float:
"""The number of seconds to sleep in between polls of subprocesses."""
return self._poll_delta
|
welchbj/sublemon
|
sublemon/runtime.py
|
Sublemon.block
|
python
|
async def block(self) -> None:
await asyncio.gather(
*itertools.chain(
(sp.wait_done() for sp in self._running_set),
(sp.wait_done() for sp in self._pending_set)))
|
Block until all running and pending subprocesses have finished.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L116-L121
| null |
class Sublemon:
"""The runtime for spawning subprocesses."""
def __init__(self, max_concurrency: int=_DEFAULT_MC,
poll_delta: float=_DEFAULT_PD) -> None:
self._max_concurrency = max_concurrency
self._poll_delta = poll_delta
self._sem = asyncio.BoundedSemaphore(max_concurrency)
self._is_running = False
self._pending_set: Set[SublemonSubprocess] = set()
self._running_set: Set[SublemonSubprocess] = set()
def __str__(self):
return ('max concurrency: {}, poll delta: {}, {} running and {} '
'pending subprocesses').format(
self._max_concurrency,
self._poll_delta,
len(self._running_set),
len(self._pending_set))
def __repr__(self):
return '<Sublemon [{}]>'.format(str(self))
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def start(self) -> None:
"""Coroutine to run this server."""
if self._is_running:
raise SublemonRuntimeError(
'Attempted to start an already-running `Sublemon` instance')
self._poll_task = asyncio.ensure_future(self._poll())
self._is_running = True
async def stop(self) -> None:
"""Coroutine to stop execution of this server."""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to stop an already-stopped `Sublemon` instance')
await self.block()
self._poll_task.cancel()
self._is_running = False
with suppress(asyncio.CancelledError):
await self._poll_task
async def _poll(self) -> None:
"""Coroutine to poll status of running subprocesses."""
while True:
await asyncio.sleep(self._poll_delta)
for subproc in list(self._running_set):
subproc._poll()
async def iter_lines(
self,
*cmds: str,
stream: str='both') -> AsyncGenerator[str, None]:
"""Coroutine to spawn commands and yield text lines from stdout."""
sps = self.spawn(*cmds)
if stream == 'both':
agen = amerge(
amerge(*[sp.stdout for sp in sps]),
amerge(*[sp.stderr for sp in sps]))
elif stream == 'stdout':
agen = amerge(*[sp.stdout for sp in sps])
elif stream == 'stderr':
agen = amerge(*[sp.stderr for sp in sps])
else:
raise SublemonRuntimeError(
'Invalid `stream` kwarg received: `' + str(stream) + '`')
async for line in agen:
yield line.decode('utf-8').rstrip()
async def gather(self, *cmds: str) -> Tuple[int]:
"""Coroutine to spawn subprocesses and block until completion.
Note:
The same `max_concurrency` restriction that applies to `spawn`
also applies here.
Returns:
The exit codes of the spawned subprocesses, in the order they were
passed.
"""
subprocs = self.spawn(*cmds)
subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]
return await asyncio.gather(*subproc_wait_coros) # type: ignore
def spawn(self, *cmds: str) -> List[SublemonSubprocess]:
"""Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore.
"""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to spawn subprocesses from a non-started server')
subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]
for sp in subprocs:
asyncio.ensure_future(sp.spawn())
return subprocs
@property
def running_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the currently-executing subprocesses."""
return self._running_set
@property
def pending_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the subprocesses waiting to begin execution."""
return self._pending_set
@property
def max_concurrency(self) -> int:
"""The max number of subprocesses that can be running concurrently."""
return self._max_concurrency
@property
def poll_delta(self) -> float:
"""The number of seconds to sleep in between polls of subprocesses."""
return self._poll_delta
|
welchbj/sublemon
|
sublemon/runtime.py
|
Sublemon.spawn
|
python
|
def spawn(self, *cmds: str) -> List[SublemonSubprocess]:
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to spawn subprocesses from a non-started server')
subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]
for sp in subprocs:
asyncio.ensure_future(sp.spawn())
return subprocs
|
Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore.
|
train
|
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L123-L138
| null |
class Sublemon:
"""The runtime for spawning subprocesses."""
def __init__(self, max_concurrency: int=_DEFAULT_MC,
poll_delta: float=_DEFAULT_PD) -> None:
self._max_concurrency = max_concurrency
self._poll_delta = poll_delta
self._sem = asyncio.BoundedSemaphore(max_concurrency)
self._is_running = False
self._pending_set: Set[SublemonSubprocess] = set()
self._running_set: Set[SublemonSubprocess] = set()
def __str__(self):
return ('max concurrency: {}, poll delta: {}, {} running and {} '
'pending subprocesses').format(
self._max_concurrency,
self._poll_delta,
len(self._running_set),
len(self._pending_set))
def __repr__(self):
return '<Sublemon [{}]>'.format(str(self))
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def start(self) -> None:
"""Coroutine to run this server."""
if self._is_running:
raise SublemonRuntimeError(
'Attempted to start an already-running `Sublemon` instance')
self._poll_task = asyncio.ensure_future(self._poll())
self._is_running = True
async def stop(self) -> None:
"""Coroutine to stop execution of this server."""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to stop an already-stopped `Sublemon` instance')
await self.block()
self._poll_task.cancel()
self._is_running = False
with suppress(asyncio.CancelledError):
await self._poll_task
async def _poll(self) -> None:
"""Coroutine to poll status of running subprocesses."""
while True:
await asyncio.sleep(self._poll_delta)
for subproc in list(self._running_set):
subproc._poll()
async def iter_lines(
self,
*cmds: str,
stream: str='both') -> AsyncGenerator[str, None]:
"""Coroutine to spawn commands and yield text lines from stdout."""
sps = self.spawn(*cmds)
if stream == 'both':
agen = amerge(
amerge(*[sp.stdout for sp in sps]),
amerge(*[sp.stderr for sp in sps]))
elif stream == 'stdout':
agen = amerge(*[sp.stdout for sp in sps])
elif stream == 'stderr':
agen = amerge(*[sp.stderr for sp in sps])
else:
raise SublemonRuntimeError(
'Invalid `stream` kwarg received: `' + str(stream) + '`')
async for line in agen:
yield line.decode('utf-8').rstrip()
async def gather(self, *cmds: str) -> Tuple[int]:
"""Coroutine to spawn subprocesses and block until completion.
Note:
The same `max_concurrency` restriction that applies to `spawn`
also applies here.
Returns:
The exit codes of the spawned subprocesses, in the order they were
passed.
"""
subprocs = self.spawn(*cmds)
subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]
return await asyncio.gather(*subproc_wait_coros) # type: ignore
async def block(self) -> None:
"""Block until all running and pending subprocesses have finished."""
await asyncio.gather(
*itertools.chain(
(sp.wait_done() for sp in self._running_set),
(sp.wait_done() for sp in self._pending_set)))
@property
def running_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the currently-executing subprocesses."""
return self._running_set
@property
def pending_subprocesses(self) -> Set[SublemonSubprocess]:
"""Get the subprocesses waiting to begin execution."""
return self._pending_set
@property
def max_concurrency(self) -> int:
"""The max number of subprocesses that can be running concurrently."""
return self._max_concurrency
@property
def poll_delta(self) -> float:
"""The number of seconds to sleep in between polls of subprocesses."""
return self._poll_delta
|
ilgarm/pyzimbra
|
pyzimbra/auth.py
|
Authenticator.authenticate
|
python
|
def authenticate(self, transport, account_name, password):
if not isinstance(transport, ZimbraClientTransport):
raise ZimbraClientException('Invalid transport')
if util.empty(account_name):
raise AuthException('Empty account name')
|
Authenticates account, if no password given tries to pre-authenticate.
@param transport: transport to use for method calls
@param account_name: account name
@param password: account password
@return: AuthToken if authentication succeeded
@raise AuthException: if authentication fails
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/auth.py#L100-L113
|
[
"def empty(val):\n \"\"\"\n Checks if value is empty.\n All unknown data types considered as empty values.\n @return: bool\n \"\"\"\n if val == None:\n return True\n\n if isinstance(val,str) and len(val) > 0:\n return False\n\n return True\n"
] |
class Authenticator(object):
"""
Authenticator provides methods to authenticate using username/password
as a user or administrator or using domain key.
"""
__metaclass__ = abc.ABCMeta
# --------------------------------------------------------------- properties
domains = property(lambda self: self._domains,
lambda self, v: setattr(self, '_domains', v))
# -------------------------------------------------------------------- bound
def __init__(self):
self.domains = {}
# ------------------------------------------------------------------ unbound
@abc.abstractmethod
def authenticate_admin(self, transport, account_name, password):
"""
Authenticates administrator using username and password.
@param transport: transport to use for method calls
@param account_name: account name
@param password: account password
@return: AuthToken if authentication succeeded
@raise AuthException: if authentication fails
"""
@abc.abstractmethod
|
ilgarm/pyzimbra
|
pyzimbra/zclient.py
|
ZimbraSoapClient.invoke
|
python
|
def invoke(self, ns, request_name, params={}, simplify=False):
if self.auth_token == None:
raise AuthException('Unable to invoke zimbra method')
if util.empty(request_name):
raise ZimbraClientException('Invalid request')
return self.transport.invoke(ns,
request_name,
params,
self.auth_token,
simplify)
|
Invokes zimbra method using established authentication session.
@param req: zimbra request
@parm params: request params
@param simplify: True to return python object, False to return xml struct
@return: zimbra response
@raise AuthException: if authentication fails
@raise SoapException: wrapped server exception
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/zclient.py#L67-L87
|
[
"def empty(val):\n \"\"\"\n Checks if value is empty.\n All unknown data types considered as empty values.\n @return: bool\n \"\"\"\n if val == None:\n return True\n\n if isinstance(val,str) and len(val) > 0:\n return False\n\n return True\n",
"def invoke(self, ns, request_name, params, auth_token, simplify=False):\n \"\"\"\n Invokes zimbra soap request.\n \"\"\"\n ZimbraClientTransport.invoke(self,\n ns,\n request_name,\n params,\n auth_token,\n simplify)\n\n headers = SOAPpy.Types.headerType()\n\n if auth_token.token != None:\n data={sconstant.E_AUTH_TOKEN: auth_token.token,\n sconstant.E_SESSION_ID: auth_token.session_id}\n context = SOAPpy.Types.structType(data=data, name=sconstant.CONTEXT)\n context._validURIs = []\n context._ns = (zconstant.SOAP_DEFAULT_PREFIX, zconstant.NS_ZIMBRA_URL)\n headers.context = context\n\n proxy = SOAPpy.SOAPProxy(self.soap_url,\n ns,\n header=headers,\n noroot=1,\n simplify_objects=simplify)\n proxy.config.debug = self.log.isEnabledFor(logging.DEBUG)\n proxy.config.strictNamespaces = 0\n proxy.config.buildWithNamespacePrefix = 0\n proxy.transport = self.http_transport\n\n _parseSOAP = SOAPpy.Parser._parseSOAP\n SOAPpy.Parser._parseSOAP = parseSOAP\n try:\n m = proxy.__getattr__(request_name)\n return m.__call__(**params)\n finally:\n SOAPpy.Parser._parseSOAP = _parseSOAP\n"
] |
class ZimbraSoapClient(object):
"""
Zimbra client main class.
"""
__metaclass__ = abc.ABCMeta
# --------------------------------------------------------------- properties
transport = property(lambda self: self._transport,
lambda self, v: setattr(self, '_transport', v))
authenticator = property(lambda self: self._authenticator,
lambda self, v: setattr(self, '_authenticator', v))
auth_token = property(lambda self: self._auth_token,
lambda self, v: setattr(self, '_auth_token', v))
# -------------------------------------------------------------------- bound
def __init__(self, soap_url, domains={}, proxy_url=None):
self.transport = SoapTransport()
self.transport.soap_url = soap_url
if proxy_url != None:
self.transport.proxy_url = proxy_url
self.authenticator = SoapAuthenticator()
self.authenticator.domains = domains
self.auth_token = None
# ------------------------------------------------------------------ unbound
@abc.abstractmethod
def authenticate(self, account_name, password):
"""
Authenticates zimbra account.
@param account_name: account email address
@param password: account password
@raise AuthException: if authentication fails
@raise SoapException: if soap communication fails
"""
|
ilgarm/pyzimbra
|
pyzimbra/z/admin.py
|
ZimbraAdmin.get_info
|
python
|
def get_info(self, account, params={}):
res = self.invoke(zconstant.NS_ZIMBRA_ADMIN_URL,
sconstant.GetInfoRequest,
params)
return res
|
Gets account info.
@param account: account to get info for
@param params: parameters to retrieve
@return: AccountInfo
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/z/admin.py#L66-L77
|
[
"def invoke(self, ns, request_name, params={}, simplify=False):\n \"\"\"\n Invokes zimbra method using established authentication session.\n @param req: zimbra request\n @parm params: request params\n @param simplify: True to return python object, False to return xml struct\n @return: zimbra response\n @raise AuthException: if authentication fails\n @raise SoapException: wrapped server exception\n \"\"\"\n if self.auth_token == None:\n raise AuthException('Unable to invoke zimbra method')\n\n if util.empty(request_name):\n raise ZimbraClientException('Invalid request')\n\n return self.transport.invoke(ns,\n request_name,\n params,\n self.auth_token,\n simplify)\n"
] |
class ZimbraAdmin(ZimbraSoapClient):
"""
Zimbra non-privileged client.
"""
# ------------------------------------------------------------------ unbound
def authenticate(self, account_name, password):
"""
Authenticates zimbra account.
@param account_name: account email address
@param password: account password
@raise AuthException: if authentication fails
@raise SoapException: if soap communication fails
"""
self.auth_token = self.authenticator.authenticate_admin(self.transport,
account_name,
password)
def get_account(self):
"""
Gets account.
@return: Account
"""
def change_password(self, account, password):
"""
Changes account password.
@param account: account to change password for
@param password: new password
"""
|
ilgarm/pyzimbra
|
pyzimbra/util.py
|
empty
|
python
|
def empty(val):
if val == None:
return True
if isinstance(val,str) and len(val) > 0:
return False
return True
|
Checks if value is empty.
All unknown data types considered as empty values.
@return: bool
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/util.py#L30-L42
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# Copyright (c) 2010, Ilgar Mashayev
#
# E-mail: pyzimbra@lab.az
# Website: http://github.com/ilgarm/pyzimbra
################################################################################
# This file is part of pyzimbra.
#
# Pyzimbra is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyzimbra is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyzimbra. If not, see <http://www.gnu.org/licenses/>.
################################################################################
@author: ilgar
"""
import re
def get_domain(email):
"""
Returns domain part of the email or None if invalid email format.
@param email: email
@return: str
"""
match = re.search('^[^@]*?@([^@]+?)$', email)
if match == None:
return None
return match.group(1)
|
ilgarm/pyzimbra
|
pyzimbra/soap_soappy.py
|
parseSOAP
|
python
|
def parseSOAP(xml_str, rules = None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
parser = xml.sax.make_parser()
t = ZimbraSOAPParser(rules = rules)
parser.setContentHandler(t)
e = xml.sax.handler.ErrorHandler()
parser.setErrorHandler(e)
inpsrc = xml.sax.xmlreader.InputSource()
inpsrc.setByteStream(StringIO(xml_str))
# turn on namespace mangeling
parser.setFeature(xml.sax.handler.feature_namespaces,1)
try:
parser.parse(inpsrc)
except xml.sax.SAXParseException, e:
parser._parser = None
raise e
return t
|
Replacement for SOAPpy._parseSOAP method to spoof SOAPParser.
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/soap_soappy.py#L54-L81
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# Copyright (c) 2010, Ilgar Mashayev
#
# E-mail: pyzimbra@lab.az
# Website: http://github.com/ilgarm/pyzimbra
################################################################################
# This file is part of pyzimbra.
#
# Pyzimbra is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyzimbra is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyzimbra. If not, see <http://www.gnu.org/licenses/>.
################################################################################
SOAPPy related methods and classes.
@author: ilgar
"""
from pyzimbra import zconstant, util
from pyzimbra.soap import SoapException
import SOAPpy
import logging
import urllib2
import xml.sax
class ZimbraSOAPParser(SOAPpy.SOAPParser):
"""
No need to keep track of hrefs for zimbra.
Ugliest hack ever: just empty list of ref ids every time.
Could not find another workaround.
"""
# -------------------------------------------------------------------- bound
def __init__(self, rules = None):
SOAPpy.SOAPParser.__init__(self, rules)
# ------------------------------------------------------------------ unbound
def endElementNS(self, name, qname):
self._ids = {}
SOAPpy.SOAPParser.endElementNS(self, name, qname)
class SoapHttpTransport(SOAPpy.Client.HTTPTransport):
"""
Http transport using urllib2, with support for proxy authentication and more.
"""
# --------------------------------------------------------------- properties
transport = property(lambda self: self._transport,
lambda self, v: setattr(self, '_transport', v))
# -------------------------------------------------------------------- bound
def __init__(self):
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
def call(self, addr, data, namespace, soapaction = None, encoding = None,
http_proxy = None, config = SOAPpy.Config):
if not isinstance(addr, SOAPpy.Client.SOAPAddress):
addr = SOAPpy.Client.SOAPAddress(addr, config)
url = addr.proto + "://" + addr.host + addr.path
headers = {'User-Agent': zconstant.USER_AGENT}
request = urllib2.Request(url, data, headers)
self.log.debug('Request url: %s' % url)
self.log.debug('Request headers')
self.log.debug(request.headers)
self.log.debug('Request data')
self.log.debug(data)
try:
opener = self.build_opener()
response = opener.open(request)
data = response.read()
self.log.debug('Response headers')
self.log.debug(response.headers)
self.log.debug('Response data')
self.log.debug(data)
except urllib2.URLError as exc:
raise self.init_soap_exception(exc)
# get the new namespace
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
return data, new_ns
def build_opener(self):
"""
Builds url opener, initializing proxy.
@return: OpenerDirector
"""
http_handler = urllib2.HTTPHandler() # debuglevel=self.transport.debug
if util.empty(self.transport.proxy_url):
return urllib2.build_opener(http_handler)
proxy_handler = urllib2.ProxyHandler(
{self.transport.proxy_url[:4]: self.transport.proxy_url})
return urllib2.build_opener(http_handler, proxy_handler)
def init_soap_exception(self, exc):
"""
Initializes exception based on soap error response.
@param exc: URLError
@return: SoapException
"""
if not isinstance(exc, urllib2.HTTPError):
return SoapException(unicode(exc), exc)
if isinstance(exc, urllib2.HTTPError):
try:
data = exc.read()
self.log.debug(data)
t = SOAPpy.Parser.parseSOAP(data)
message = '%s:%s' % (t.Fault.faultcode, t.Fault.faultstring)
e = SoapException(message, exc)
e.code = t.Fault.detail.Error.Code
e.trace = t.Fault.detail.Error.Trace
return e
except:
return SoapException(unicode(exc), exc)
return SoapException(exc.reason, exc)
|
ilgarm/pyzimbra
|
pyzimbra/soap_soappy.py
|
SoapHttpTransport.build_opener
|
python
|
def build_opener(self):
http_handler = urllib2.HTTPHandler() # debuglevel=self.transport.debug
if util.empty(self.transport.proxy_url):
return urllib2.build_opener(http_handler)
proxy_handler = urllib2.ProxyHandler(
{self.transport.proxy_url[:4]: self.transport.proxy_url})
return urllib2.build_opener(http_handler, proxy_handler)
|
Builds url opener, initializing proxy.
@return: OpenerDirector
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/soap_soappy.py#L138-L151
|
[
"def empty(val):\n \"\"\"\n Checks if value is empty.\n All unknown data types considered as empty values.\n @return: bool\n \"\"\"\n if val == None:\n return True\n\n if isinstance(val,str) and len(val) > 0:\n return False\n\n return True\n"
] |
class SoapHttpTransport(SOAPpy.Client.HTTPTransport):
"""
Http transport using urllib2, with support for proxy authentication and more.
"""
# --------------------------------------------------------------- properties
transport = property(lambda self: self._transport,
lambda self, v: setattr(self, '_transport', v))
# -------------------------------------------------------------------- bound
def __init__(self):
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
def call(self, addr, data, namespace, soapaction = None, encoding = None,
http_proxy = None, config = SOAPpy.Config):
if not isinstance(addr, SOAPpy.Client.SOAPAddress):
addr = SOAPpy.Client.SOAPAddress(addr, config)
url = addr.proto + "://" + addr.host + addr.path
headers = {'User-Agent': zconstant.USER_AGENT}
request = urllib2.Request(url, data, headers)
self.log.debug('Request url: %s' % url)
self.log.debug('Request headers')
self.log.debug(request.headers)
self.log.debug('Request data')
self.log.debug(data)
try:
opener = self.build_opener()
response = opener.open(request)
data = response.read()
self.log.debug('Response headers')
self.log.debug(response.headers)
self.log.debug('Response data')
self.log.debug(data)
except urllib2.URLError as exc:
raise self.init_soap_exception(exc)
# get the new namespace
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
return data, new_ns
def init_soap_exception(self, exc):
"""
Initializes exception based on soap error response.
@param exc: URLError
@return: SoapException
"""
if not isinstance(exc, urllib2.HTTPError):
return SoapException(unicode(exc), exc)
if isinstance(exc, urllib2.HTTPError):
try:
data = exc.read()
self.log.debug(data)
t = SOAPpy.Parser.parseSOAP(data)
message = '%s:%s' % (t.Fault.faultcode, t.Fault.faultstring)
e = SoapException(message, exc)
e.code = t.Fault.detail.Error.Code
e.trace = t.Fault.detail.Error.Trace
return e
except:
return SoapException(unicode(exc), exc)
return SoapException(exc.reason, exc)
|
ilgarm/pyzimbra
|
pyzimbra/soap_soappy.py
|
SoapHttpTransport.init_soap_exception
|
python
|
def init_soap_exception(self, exc):
if not isinstance(exc, urllib2.HTTPError):
return SoapException(unicode(exc), exc)
if isinstance(exc, urllib2.HTTPError):
try:
data = exc.read()
self.log.debug(data)
t = SOAPpy.Parser.parseSOAP(data)
message = '%s:%s' % (t.Fault.faultcode, t.Fault.faultstring)
e = SoapException(message, exc)
e.code = t.Fault.detail.Error.Code
e.trace = t.Fault.detail.Error.Trace
return e
except:
return SoapException(unicode(exc), exc)
return SoapException(exc.reason, exc)
|
Initializes exception based on soap error response.
@param exc: URLError
@return: SoapException
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/soap_soappy.py#L154-L177
| null |
class SoapHttpTransport(SOAPpy.Client.HTTPTransport):
"""
Http transport using urllib2, with support for proxy authentication and more.
"""
# --------------------------------------------------------------- properties
transport = property(lambda self: self._transport,
lambda self, v: setattr(self, '_transport', v))
# -------------------------------------------------------------------- bound
def __init__(self):
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
def call(self, addr, data, namespace, soapaction = None, encoding = None,
http_proxy = None, config = SOAPpy.Config):
if not isinstance(addr, SOAPpy.Client.SOAPAddress):
addr = SOAPpy.Client.SOAPAddress(addr, config)
url = addr.proto + "://" + addr.host + addr.path
headers = {'User-Agent': zconstant.USER_AGENT}
request = urllib2.Request(url, data, headers)
self.log.debug('Request url: %s' % url)
self.log.debug('Request headers')
self.log.debug(request.headers)
self.log.debug('Request data')
self.log.debug(data)
try:
opener = self.build_opener()
response = opener.open(request)
data = response.read()
self.log.debug('Response headers')
self.log.debug(response.headers)
self.log.debug('Response data')
self.log.debug(data)
except urllib2.URLError as exc:
raise self.init_soap_exception(exc)
# get the new namespace
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
return data, new_ns
def build_opener(self):
"""
Builds url opener, initializing proxy.
@return: OpenerDirector
"""
http_handler = urllib2.HTTPHandler() # debuglevel=self.transport.debug
if util.empty(self.transport.proxy_url):
return urllib2.build_opener(http_handler)
proxy_handler = urllib2.ProxyHandler(
{self.transport.proxy_url[:4]: self.transport.proxy_url})
return urllib2.build_opener(http_handler, proxy_handler)
|
ilgarm/pyzimbra
|
pyzimbra/soap_auth.py
|
SoapAuthenticator.authenticate_admin
|
python
|
def authenticate_admin(self, transport, account_name, password):
Authenticator.authenticate_admin(self, transport, account_name, password)
auth_token = AuthToken()
auth_token.account_name = account_name
params = {sconstant.E_NAME: account_name,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating admin %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ADMIN_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated admin %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
|
Authenticates administrator using username and password.
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/soap_auth.py#L50-L77
|
[
"def authenticate_admin(self, transport, account_name, password):\n \"\"\"\n Authenticates administrator using username and password.\n @param transport: transport to use for method calls\n @param account_name: account name\n @param password: account password\n @return: AuthToken if authentication succeeded\n @raise AuthException: if authentication fails\n \"\"\"\n",
"def invoke(self, ns, request_name, params, auth_token, simplify=False):\n \"\"\"\n Invokes zimbra soap request.\n \"\"\"\n ZimbraClientTransport.invoke(self,\n ns,\n request_name,\n params,\n auth_token,\n simplify)\n\n headers = SOAPpy.Types.headerType()\n\n if auth_token.token != None:\n data={sconstant.E_AUTH_TOKEN: auth_token.token,\n sconstant.E_SESSION_ID: auth_token.session_id}\n context = SOAPpy.Types.structType(data=data, name=sconstant.CONTEXT)\n context._validURIs = []\n context._ns = (zconstant.SOAP_DEFAULT_PREFIX, zconstant.NS_ZIMBRA_URL)\n headers.context = context\n\n proxy = SOAPpy.SOAPProxy(self.soap_url,\n ns,\n header=headers,\n noroot=1,\n simplify_objects=simplify)\n proxy.config.debug = self.log.isEnabledFor(logging.DEBUG)\n proxy.config.strictNamespaces = 0\n proxy.config.buildWithNamespacePrefix = 0\n proxy.transport = self.http_transport\n\n _parseSOAP = SOAPpy.Parser._parseSOAP\n SOAPpy.Parser._parseSOAP = parseSOAP\n try:\n m = proxy.__getattr__(request_name)\n return m.__call__(**params)\n finally:\n SOAPpy.Parser._parseSOAP = _parseSOAP\n"
] |
class SoapAuthenticator(Authenticator):
"""
Soap authenticator.
"""
# --------------------------------------------------------------- properties
# -------------------------------------------------------------------- bound
def __init__(self):
Authenticator.__init__(self)
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
def authenticate(self, transport, account_name, password=None):
"""
Authenticates account using soap method.
"""
Authenticator.authenticate(self, transport, account_name, password)
if password == None:
return self.pre_auth(transport, account_name)
else:
return self.auth(transport, account_name, password)
def auth(self, transport, account_name, password):
"""
Authenticates using username and password.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating account %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
if hasattr(res, 'sessionId'):
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
def pre_auth(self, transport, account_name):
"""
Authenticates using username and domain key.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
domain = util.get_domain(account_name)
if domain == None:
raise AuthException('Invalid auth token account')
if domain in self.domains:
domain_key = self.domains[domain]
else:
domain_key = None
if domain_key == None:
raise AuthException('Invalid domain key for domain %s' % domain)
self.log.debug('Initialized domain key for account %s'
% account_name)
expires = 0
timestamp = int(time() * 1000)
pak = hmac.new(domain_key, '%s|%s|%s|%s' %
(account_name, sconstant.E_NAME, expires, timestamp),
hashlib.sha1).hexdigest()
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
attrs = {sconstant.A_TIMESTAMP: timestamp, sconstant.A_EXPIRES: expires}
preauth = SOAPpy.Types.stringType(data=pak,
name=sconstant.E_PREAUTH,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PREAUTH: preauth}
self.log.debug('Authenticating account %s using domain key'
% account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
|
ilgarm/pyzimbra
|
pyzimbra/soap_auth.py
|
SoapAuthenticator.authenticate
|
python
|
def authenticate(self, transport, account_name, password=None):
Authenticator.authenticate(self, transport, account_name, password)
if password == None:
return self.pre_auth(transport, account_name)
else:
return self.auth(transport, account_name, password)
|
Authenticates account using soap method.
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/soap_auth.py#L80-L89
|
[
"def auth(self, transport, account_name, password):\n if not account_name == self.account_name:\n raise AuthException('Invalid username')\n\n if not password == self.password:\n raise AuthException('Invalid password')\n\n token = AuthToken()\n token.account_name = self.account_name\n token.token = self.token\n token.session_id = self.session_id\n\n return token\n",
"def pre_auth(self, transport, account_name):\n if not account_name == self.account_name:\n raise AuthException('Invalid username')\n\n token = AuthToken()\n token.account_name = self.account_name\n token.token = self.token\n token.session_id = self.session_id\n\n return token\n",
"def authenticate(self, transport, account_name, password):\n \"\"\"\n Authenticates account, if no password given tries to pre-authenticate.\n @param transport: transport to use for method calls\n @param account_name: account name\n @param password: account password\n @return: AuthToken if authentication succeeded\n @raise AuthException: if authentication fails\n \"\"\"\n if not isinstance(transport, ZimbraClientTransport):\n raise ZimbraClientException('Invalid transport')\n\n if util.empty(account_name):\n raise AuthException('Empty account name')\n",
"def auth(self, transport, account_name, password):\n \"\"\"\n Authenticates using username and password.\n \"\"\"\n auth_token = AuthToken()\n auth_token.account_name = account_name\n\n attrs = {sconstant.A_BY: sconstant.V_NAME}\n account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)\n\n params = {sconstant.E_ACCOUNT: account,\n sconstant.E_PASSWORD: password}\n\n self.log.debug('Authenticating account %s' % account_name)\n try:\n res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,\n sconstant.AuthRequest,\n params,\n auth_token)\n except SoapException as exc:\n raise AuthException(unicode(exc), exc)\n\n auth_token.token = res.authToken\n\n if hasattr(res, 'sessionId'):\n auth_token.session_id = res.sessionId\n\n self.log.info('Authenticated account %s, session id %s'\n % (account_name, auth_token.session_id))\n\n return auth_token\n",
"def pre_auth(self, transport, account_name):\n \"\"\"\n Authenticates using username and domain key.\n \"\"\"\n auth_token = AuthToken()\n auth_token.account_name = account_name\n\n domain = util.get_domain(account_name)\n if domain == None:\n raise AuthException('Invalid auth token account')\n\n if domain in self.domains:\n domain_key = self.domains[domain]\n else:\n domain_key = None\n\n if domain_key == None:\n raise AuthException('Invalid domain key for domain %s' % domain)\n\n self.log.debug('Initialized domain key for account %s'\n % account_name)\n\n expires = 0\n timestamp = int(time() * 1000)\n pak = hmac.new(domain_key, '%s|%s|%s|%s' %\n (account_name, sconstant.E_NAME, expires, timestamp),\n hashlib.sha1).hexdigest()\n\n attrs = {sconstant.A_BY: sconstant.V_NAME}\n account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)\n\n attrs = {sconstant.A_TIMESTAMP: timestamp, sconstant.A_EXPIRES: expires}\n preauth = SOAPpy.Types.stringType(data=pak,\n name=sconstant.E_PREAUTH,\n attrs=attrs)\n\n params = {sconstant.E_ACCOUNT: account,\n sconstant.E_PREAUTH: preauth}\n\n self.log.debug('Authenticating account %s using domain key'\n % account_name)\n try:\n res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,\n sconstant.AuthRequest,\n params,\n auth_token)\n except SoapException as exc:\n raise AuthException(unicode(exc), exc)\n\n auth_token.token = res.authToken\n auth_token.session_id = res.sessionId\n\n self.log.info('Authenticated account %s, session id %s'\n % (account_name, auth_token.session_id))\n\n return auth_token\n"
] |
class SoapAuthenticator(Authenticator):
"""
Soap authenticator.
"""
# --------------------------------------------------------------- properties
# -------------------------------------------------------------------- bound
def __init__(self):
Authenticator.__init__(self)
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
def authenticate_admin(self, transport, account_name, password):
"""
Authenticates administrator using username and password.
"""
Authenticator.authenticate_admin(self, transport, account_name, password)
auth_token = AuthToken()
auth_token.account_name = account_name
params = {sconstant.E_NAME: account_name,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating admin %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ADMIN_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated admin %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
def auth(self, transport, account_name, password):
"""
Authenticates using username and password.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating account %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
if hasattr(res, 'sessionId'):
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
def pre_auth(self, transport, account_name):
"""
Authenticates using username and domain key.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
domain = util.get_domain(account_name)
if domain == None:
raise AuthException('Invalid auth token account')
if domain in self.domains:
domain_key = self.domains[domain]
else:
domain_key = None
if domain_key == None:
raise AuthException('Invalid domain key for domain %s' % domain)
self.log.debug('Initialized domain key for account %s'
% account_name)
expires = 0
timestamp = int(time() * 1000)
pak = hmac.new(domain_key, '%s|%s|%s|%s' %
(account_name, sconstant.E_NAME, expires, timestamp),
hashlib.sha1).hexdigest()
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
attrs = {sconstant.A_TIMESTAMP: timestamp, sconstant.A_EXPIRES: expires}
preauth = SOAPpy.Types.stringType(data=pak,
name=sconstant.E_PREAUTH,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PREAUTH: preauth}
self.log.debug('Authenticating account %s using domain key'
% account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
|
ilgarm/pyzimbra
|
pyzimbra/soap_auth.py
|
SoapAuthenticator.auth
|
python
|
def auth(self, transport, account_name, password):
auth_token = AuthToken()
auth_token.account_name = account_name
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating account %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
if hasattr(res, 'sessionId'):
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
|
Authenticates using username and password.
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/soap_auth.py#L92-L122
| null |
class SoapAuthenticator(Authenticator):
"""
Soap authenticator.
"""
# --------------------------------------------------------------- properties
# -------------------------------------------------------------------- bound
def __init__(self):
Authenticator.__init__(self)
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
def authenticate_admin(self, transport, account_name, password):
"""
Authenticates administrator using username and password.
"""
Authenticator.authenticate_admin(self, transport, account_name, password)
auth_token = AuthToken()
auth_token.account_name = account_name
params = {sconstant.E_NAME: account_name,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating admin %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ADMIN_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated admin %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
def authenticate(self, transport, account_name, password=None):
"""
Authenticates account using soap method.
"""
Authenticator.authenticate(self, transport, account_name, password)
if password == None:
return self.pre_auth(transport, account_name)
else:
return self.auth(transport, account_name, password)
def auth(self, transport, account_name, password):
"""
Authenticates using username and password.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating account %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
if hasattr(res, 'sessionId'):
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
def pre_auth(self, transport, account_name):
"""
Authenticates using username and domain key.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
domain = util.get_domain(account_name)
if domain == None:
raise AuthException('Invalid auth token account')
if domain in self.domains:
domain_key = self.domains[domain]
else:
domain_key = None
if domain_key == None:
raise AuthException('Invalid domain key for domain %s' % domain)
self.log.debug('Initialized domain key for account %s'
% account_name)
expires = 0
timestamp = int(time() * 1000)
pak = hmac.new(domain_key, '%s|%s|%s|%s' %
(account_name, sconstant.E_NAME, expires, timestamp),
hashlib.sha1).hexdigest()
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
attrs = {sconstant.A_TIMESTAMP: timestamp, sconstant.A_EXPIRES: expires}
preauth = SOAPpy.Types.stringType(data=pak,
name=sconstant.E_PREAUTH,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PREAUTH: preauth}
self.log.debug('Authenticating account %s using domain key'
% account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
|
ilgarm/pyzimbra
|
pyzimbra/soap_auth.py
|
SoapAuthenticator.pre_auth
|
python
|
def pre_auth(self, transport, account_name):
auth_token = AuthToken()
auth_token.account_name = account_name
domain = util.get_domain(account_name)
if domain == None:
raise AuthException('Invalid auth token account')
if domain in self.domains:
domain_key = self.domains[domain]
else:
domain_key = None
if domain_key == None:
raise AuthException('Invalid domain key for domain %s' % domain)
self.log.debug('Initialized domain key for account %s'
% account_name)
expires = 0
timestamp = int(time() * 1000)
pak = hmac.new(domain_key, '%s|%s|%s|%s' %
(account_name, sconstant.E_NAME, expires, timestamp),
hashlib.sha1).hexdigest()
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
attrs = {sconstant.A_TIMESTAMP: timestamp, sconstant.A_EXPIRES: expires}
preauth = SOAPpy.Types.stringType(data=pak,
name=sconstant.E_PREAUTH,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PREAUTH: preauth}
self.log.debug('Authenticating account %s using domain key'
% account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
|
Authenticates using username and domain key.
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/soap_auth.py#L125-L180
|
[
"def get_domain(email):\n \"\"\"\n Returns domain part of the email or None if invalid email format.\n @param email: email\n @return: str\n \"\"\"\n match = re.search('^[^@]*?@([^@]+?)$', email)\n\n if match == None:\n return None\n\n return match.group(1)\n"
] |
class SoapAuthenticator(Authenticator):
"""
Soap authenticator.
"""
# --------------------------------------------------------------- properties
# -------------------------------------------------------------------- bound
def __init__(self):
Authenticator.__init__(self)
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
def authenticate_admin(self, transport, account_name, password):
"""
Authenticates administrator using username and password.
"""
Authenticator.authenticate_admin(self, transport, account_name, password)
auth_token = AuthToken()
auth_token.account_name = account_name
params = {sconstant.E_NAME: account_name,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating admin %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ADMIN_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated admin %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
def authenticate(self, transport, account_name, password=None):
"""
Authenticates account using soap method.
"""
Authenticator.authenticate(self, transport, account_name, password)
if password == None:
return self.pre_auth(transport, account_name)
else:
return self.auth(transport, account_name, password)
def auth(self, transport, account_name, password):
"""
Authenticates using username and password.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating account %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
if hasattr(res, 'sessionId'):
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
|
ilgarm/pyzimbra
|
pyzimbra/base.py
|
ZimbraClientException.print_trace
|
python
|
def print_trace(self):
traceback.print_exc()
for tb in self.tracebacks:
print tb,
print ''
|
Prints stack trace for current exceptions chain.
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/base.py#L82-L89
| null |
class ZimbraClientException(Exception):
"""
Zimbra client exception.
"""
# --------------------------------------------------------------- properties
message = property(lambda self: self._message,
lambda self, v: setattr(self, '_message', v))
tracebacks = property(lambda self: self._tracebacks,
lambda self, v: setattr(self, '_tracebacks', v))
# -------------------------------------------------------------------- bound
def __init__(self, message, cause = None):
Exception.__init__(self, message)
self._message = message
self.__cause__ = cause
self.tracebacks = []
if cause != None:
if isinstance(cause, ZimbraClientException):
self.tracebacks = cause.tracebacks
list = traceback.format_exc().split('\n')[1:]
self.tracebacks.insert(0, '\n'.join(list))
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return self.message
# ------------------------------------------------------------------ unbound
|
ilgarm/pyzimbra
|
pyzimbra/z/client.py
|
ZimbraClient.authenticate
|
python
|
def authenticate(self, account_name, password):
self.auth_token = self.authenticator.authenticate(self.transport,
account_name,
password)
|
Authenticates zimbra account.
@param account_name: account email address
@param password: account password
@raise AuthException: if authentication fails
@raise SoapException: if soap communication fails
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/z/client.py#L40-L50
|
[
"def authenticate(self, transport, account_name, password=None):\n \"\"\"\n Authenticates account using soap method.\n \"\"\"\n Authenticator.authenticate(self, transport, account_name, password)\n\n if password == None:\n return self.pre_auth(transport, account_name)\n else:\n return self.auth(transport, account_name, password)\n"
] |
class ZimbraClient(ZimbraSoapClient):
"""
Zimbra non-privileged client.
"""
# ------------------------------------------------------------------ unbound
def change_password(self, current_password, new_password):
"""
Changes account password.
@param current_password: current password
@param new_password: new password
"""
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=self.auth_token.account_name,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_OLD_PASSWORD: current_password,
sconstant.E_PASSWORD: new_password}
self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.ChangePasswordRequest,
params)
def get_account_info(self):
"""
Gets account info.
@return: AccountInfo
"""
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=self.auth_token.account_name,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account}
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetAccountInfoRequest,
params)
info = AccountInfo()
info.parse(res)
return info
def get_info(self, params={}):
"""
Gets mailbox info.
@param params: params to retrieve
@return: AccountInfo
"""
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetInfoRequest,
params)
return res
|
ilgarm/pyzimbra
|
pyzimbra/z/client.py
|
ZimbraClient.change_password
|
python
|
def change_password(self, current_password, new_password):
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=self.auth_token.account_name,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_OLD_PASSWORD: current_password,
sconstant.E_PASSWORD: new_password}
self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.ChangePasswordRequest,
params)
|
Changes account password.
@param current_password: current password
@param new_password: new password
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/z/client.py#L53-L69
|
[
"def invoke(self, ns, request_name, params={}, simplify=False):\n \"\"\"\n Invokes zimbra method using established authentication session.\n @param req: zimbra request\n @parm params: request params\n @param simplify: True to return python object, False to return xml struct\n @return: zimbra response\n @raise AuthException: if authentication fails\n @raise SoapException: wrapped server exception\n \"\"\"\n if self.auth_token == None:\n raise AuthException('Unable to invoke zimbra method')\n\n if util.empty(request_name):\n raise ZimbraClientException('Invalid request')\n\n return self.transport.invoke(ns,\n request_name,\n params,\n self.auth_token,\n simplify)\n"
] |
class ZimbraClient(ZimbraSoapClient):
"""
Zimbra non-privileged client.
"""
# ------------------------------------------------------------------ unbound
def authenticate(self, account_name, password):
"""
Authenticates zimbra account.
@param account_name: account email address
@param password: account password
@raise AuthException: if authentication fails
@raise SoapException: if soap communication fails
"""
self.auth_token = self.authenticator.authenticate(self.transport,
account_name,
password)
def get_account_info(self):
"""
Gets account info.
@return: AccountInfo
"""
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=self.auth_token.account_name,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account}
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetAccountInfoRequest,
params)
info = AccountInfo()
info.parse(res)
return info
def get_info(self, params={}):
"""
Gets mailbox info.
@param params: params to retrieve
@return: AccountInfo
"""
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetInfoRequest,
params)
return res
|
ilgarm/pyzimbra
|
pyzimbra/z/client.py
|
ZimbraClient.get_account_info
|
python
|
def get_account_info(self):
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=self.auth_token.account_name,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account}
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetAccountInfoRequest,
params)
info = AccountInfo()
info.parse(res)
return info
|
Gets account info.
@return: AccountInfo
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/z/client.py#L72-L90
|
[
"def invoke(self, ns, request_name, params={}, simplify=False):\n \"\"\"\n Invokes zimbra method using established authentication session.\n @param req: zimbra request\n @parm params: request params\n @param simplify: True to return python object, False to return xml struct\n @return: zimbra response\n @raise AuthException: if authentication fails\n @raise SoapException: wrapped server exception\n \"\"\"\n if self.auth_token == None:\n raise AuthException('Unable to invoke zimbra method')\n\n if util.empty(request_name):\n raise ZimbraClientException('Invalid request')\n\n return self.transport.invoke(ns,\n request_name,\n params,\n self.auth_token,\n simplify)\n"
] |
class ZimbraClient(ZimbraSoapClient):
"""
Zimbra non-privileged client.
"""
# ------------------------------------------------------------------ unbound
def authenticate(self, account_name, password):
"""
Authenticates zimbra account.
@param account_name: account email address
@param password: account password
@raise AuthException: if authentication fails
@raise SoapException: if soap communication fails
"""
self.auth_token = self.authenticator.authenticate(self.transport,
account_name,
password)
def change_password(self, current_password, new_password):
"""
Changes account password.
@param current_password: current password
@param new_password: new password
"""
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=self.auth_token.account_name,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_OLD_PASSWORD: current_password,
sconstant.E_PASSWORD: new_password}
self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.ChangePasswordRequest,
params)
def get_info(self, params={}):
"""
Gets mailbox info.
@param params: params to retrieve
@return: AccountInfo
"""
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetInfoRequest,
params)
return res
|
ilgarm/pyzimbra
|
pyzimbra/z/client.py
|
ZimbraClient.get_info
|
python
|
def get_info(self, params={}):
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetInfoRequest,
params)
return res
|
Gets mailbox info.
@param params: params to retrieve
@return: AccountInfo
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/z/client.py#L93-L103
|
[
"def invoke(self, ns, request_name, params={}, simplify=False):\n \"\"\"\n Invokes zimbra method using established authentication session.\n @param req: zimbra request\n @parm params: request params\n @param simplify: True to return python object, False to return xml struct\n @return: zimbra response\n @raise AuthException: if authentication fails\n @raise SoapException: wrapped server exception\n \"\"\"\n if self.auth_token == None:\n raise AuthException('Unable to invoke zimbra method')\n\n if util.empty(request_name):\n raise ZimbraClientException('Invalid request')\n\n return self.transport.invoke(ns,\n request_name,\n params,\n self.auth_token,\n simplify)\n"
] |
class ZimbraClient(ZimbraSoapClient):
"""
Zimbra non-privileged client.
"""
# ------------------------------------------------------------------ unbound
def authenticate(self, account_name, password):
"""
Authenticates zimbra account.
@param account_name: account email address
@param password: account password
@raise AuthException: if authentication fails
@raise SoapException: if soap communication fails
"""
self.auth_token = self.authenticator.authenticate(self.transport,
account_name,
password)
def change_password(self, current_password, new_password):
"""
Changes account password.
@param current_password: current password
@param new_password: new password
"""
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=self.auth_token.account_name,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_OLD_PASSWORD: current_password,
sconstant.E_PASSWORD: new_password}
self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.ChangePasswordRequest,
params)
def get_account_info(self):
"""
Gets account info.
@return: AccountInfo
"""
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=self.auth_token.account_name,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account}
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetAccountInfoRequest,
params)
info = AccountInfo()
info.parse(res)
return info
|
ilgarm/pyzimbra
|
pyzimbra/soap_transport.py
|
SoapTransport.invoke
|
python
|
def invoke(self, ns, request_name, params, auth_token, simplify=False):
ZimbraClientTransport.invoke(self,
ns,
request_name,
params,
auth_token,
simplify)
headers = SOAPpy.Types.headerType()
if auth_token.token != None:
data={sconstant.E_AUTH_TOKEN: auth_token.token,
sconstant.E_SESSION_ID: auth_token.session_id}
context = SOAPpy.Types.structType(data=data, name=sconstant.CONTEXT)
context._validURIs = []
context._ns = (zconstant.SOAP_DEFAULT_PREFIX, zconstant.NS_ZIMBRA_URL)
headers.context = context
proxy = SOAPpy.SOAPProxy(self.soap_url,
ns,
header=headers,
noroot=1,
simplify_objects=simplify)
proxy.config.debug = self.log.isEnabledFor(logging.DEBUG)
proxy.config.strictNamespaces = 0
proxy.config.buildWithNamespacePrefix = 0
proxy.transport = self.http_transport
_parseSOAP = SOAPpy.Parser._parseSOAP
SOAPpy.Parser._parseSOAP = parseSOAP
try:
m = proxy.__getattr__(request_name)
return m.__call__(**params)
finally:
SOAPpy.Parser._parseSOAP = _parseSOAP
|
Invokes zimbra soap request.
|
train
|
https://github.com/ilgarm/pyzimbra/blob/c397bc7497554d260ec6fd1a80405aed872a70cc/pyzimbra/soap_transport.py#L52-L89
|
[
"def invoke(self, ns, request_name, params, auth_token, simplify):\n \"\"\"\n Invokes zimbra request.\n @param ns: namespace of the request method\n @param request_name: name of the request method\n @param params: parameters to pass to method call\n @param auth_token: authentication token to use for session\n @param simplify: True to return python object, False to return xml struct\n @return: zimbra response\n \"\"\"\n if auth_token == None:\n raise ZimbraClientException('Invalid auth token')\n"
] |
class SoapTransport(ZimbraClientTransport):
"""
Soap transport.
"""
# --------------------------------------------------------------- properties
http_transport = SoapHttpTransport()
# -------------------------------------------------------------------- bound
def __init__(self):
ZimbraClientTransport.__init__(self)
self.http_transport.transport = self
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
|
omaraboumrad/mastool
|
mastool/extension.py
|
Mastool.build_message
|
python
|
def build_message(self, checker):
solution = ' (%s)' % checker.solution if self.with_solutions else ''
return '{} {}{}'.format(checker.code,
checker.msg,
solution)
|
Builds the checker's error message to report
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/extension.py#L32-L37
| null |
class Mastool(object):
"""Flake8 Extension"""
name = 'mastool'
version = __version__
def __init__(self, tree, filename):
self.tree = tree
self.filename = filename
@classmethod
def add_options(cls, parser):
"""Provides the --with-solutions option"""
parser.add_option('--with-solutions',
default=False,
action='store',
help='Enables mastool possible solutions')
@classmethod
def parse_options(cls, options):
"""Assigns the with_solutions option"""
cls.with_solutions = options.with_solutions
def run(self):
"""Primary entry point to the plugin, runs once per file."""
paths = [x for x in practices.__dict__.values()
if hasattr(x, 'code')]
for node in ast.walk(self.tree):
try:
lineno, col_offset = node.lineno, node.col_offset
except AttributeError:
# Not all nodes have coordinates, e.g.: ast.Module
continue
for checker in paths:
if checker(node):
message = self.build_message(checker)
yield lineno, col_offset, message, type(self)
|
omaraboumrad/mastool
|
mastool/extension.py
|
Mastool.run
|
python
|
def run(self):
paths = [x for x in practices.__dict__.values()
if hasattr(x, 'code')]
for node in ast.walk(self.tree):
try:
lineno, col_offset = node.lineno, node.col_offset
except AttributeError:
# Not all nodes have coordinates, e.g.: ast.Module
continue
for checker in paths:
if checker(node):
message = self.build_message(checker)
yield lineno, col_offset, message, type(self)
|
Primary entry point to the plugin, runs once per file.
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/extension.py#L39-L54
|
[
"def build_message(self, checker):\n \"\"\"Builds the checker's error message to report\"\"\"\n solution = ' (%s)' % checker.solution if self.with_solutions else ''\n return '{} {}{}'.format(checker.code,\n checker.msg,\n solution)\n"
] |
class Mastool(object):
"""Flake8 Extension"""
name = 'mastool'
version = __version__
def __init__(self, tree, filename):
self.tree = tree
self.filename = filename
@classmethod
def add_options(cls, parser):
"""Provides the --with-solutions option"""
parser.add_option('--with-solutions',
default=False,
action='store',
help='Enables mastool possible solutions')
@classmethod
def parse_options(cls, options):
"""Assigns the with_solutions option"""
cls.with_solutions = options.with_solutions
def build_message(self, checker):
"""Builds the checker's error message to report"""
solution = ' (%s)' % checker.solution if self.with_solutions else ''
return '{} {}{}'.format(checker.code,
checker.msg,
solution)
|
omaraboumrad/mastool
|
mastool/helpers.py
|
is_boolean
|
python
|
def is_boolean(node):
return any([
isinstance(node, ast.Name)
and node.id in ('True', 'False'),
hasattr(ast, 'NameConstant') # Support for Python 3 NameConstant
and isinstance(node, getattr(ast, 'NameConstant')) # screw you pylint!
and str(node.value) in ('True', 'False')
])
|
Checks if node is True or False
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/helpers.py#L15-L23
| null |
"""
helpers to support practices
"""
import ast
def has_else(node):
"""Checks if node has else"""
return (
isinstance(node, ast.If)
and len(node.orelse) > 0
)
def call_name_is(siter, name):
"""Checks the function call name"""
return (
isinstance(siter, ast.Call)
and hasattr(siter.func, 'attr')
and siter.func.attr == name
)
def target_names(targets):
"""Retrieves the target names"""
names = []
for entry in targets:
if isinstance(entry, ast.Name):
names.append(entry.id)
elif isinstance(entry, ast.Tuple):
for element in entry.elts:
if isinstance(element, ast.Name):
names.append(element.id)
return names
def importfrom_names(names):
"""Retrieves the importfrom names"""
return [n.name for n in names]
def labeled(**kwargs):
"""decorator to give practices labels"""
def for_practice(practice):
"""assigns label to practice"""
practice.code = kwargs.pop('code')
practice.msg = kwargs.pop('msg')
practice.solution = kwargs.pop('solution')
return practice
return for_practice
|
omaraboumrad/mastool
|
mastool/helpers.py
|
call_name_is
|
python
|
def call_name_is(siter, name):
return (
isinstance(siter, ast.Call)
and hasattr(siter.func, 'attr')
and siter.func.attr == name
)
|
Checks the function call name
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/helpers.py#L26-L32
| null |
"""
helpers to support practices
"""
import ast
def has_else(node):
"""Checks if node has else"""
return (
isinstance(node, ast.If)
and len(node.orelse) > 0
)
def is_boolean(node):
"""Checks if node is True or False"""
return any([
isinstance(node, ast.Name)
and node.id in ('True', 'False'),
hasattr(ast, 'NameConstant') # Support for Python 3 NameConstant
and isinstance(node, getattr(ast, 'NameConstant')) # screw you pylint!
and str(node.value) in ('True', 'False')
])
def target_names(targets):
"""Retrieves the target names"""
names = []
for entry in targets:
if isinstance(entry, ast.Name):
names.append(entry.id)
elif isinstance(entry, ast.Tuple):
for element in entry.elts:
if isinstance(element, ast.Name):
names.append(element.id)
return names
def importfrom_names(names):
"""Retrieves the importfrom names"""
return [n.name for n in names]
def labeled(**kwargs):
"""decorator to give practices labels"""
def for_practice(practice):
"""assigns label to practice"""
practice.code = kwargs.pop('code')
practice.msg = kwargs.pop('msg')
practice.solution = kwargs.pop('solution')
return practice
return for_practice
|
omaraboumrad/mastool
|
mastool/helpers.py
|
target_names
|
python
|
def target_names(targets):
names = []
for entry in targets:
if isinstance(entry, ast.Name):
names.append(entry.id)
elif isinstance(entry, ast.Tuple):
for element in entry.elts:
if isinstance(element, ast.Name):
names.append(element.id)
return names
|
Retrieves the target names
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/helpers.py#L35-L46
| null |
"""
helpers to support practices
"""
import ast
def has_else(node):
"""Checks if node has else"""
return (
isinstance(node, ast.If)
and len(node.orelse) > 0
)
def is_boolean(node):
"""Checks if node is True or False"""
return any([
isinstance(node, ast.Name)
and node.id in ('True', 'False'),
hasattr(ast, 'NameConstant') # Support for Python 3 NameConstant
and isinstance(node, getattr(ast, 'NameConstant')) # screw you pylint!
and str(node.value) in ('True', 'False')
])
def call_name_is(siter, name):
"""Checks the function call name"""
return (
isinstance(siter, ast.Call)
and hasattr(siter.func, 'attr')
and siter.func.attr == name
)
def importfrom_names(names):
"""Retrieves the importfrom names"""
return [n.name for n in names]
def labeled(**kwargs):
"""decorator to give practices labels"""
def for_practice(practice):
"""assigns label to practice"""
practice.code = kwargs.pop('code')
practice.msg = kwargs.pop('msg')
practice.solution = kwargs.pop('solution')
return practice
return for_practice
|
omaraboumrad/mastool
|
mastool/helpers.py
|
labeled
|
python
|
def labeled(**kwargs):
def for_practice(practice):
"""assigns label to practice"""
practice.code = kwargs.pop('code')
practice.msg = kwargs.pop('msg')
practice.solution = kwargs.pop('solution')
return practice
return for_practice
|
decorator to give practices labels
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/helpers.py#L54-L62
| null |
"""
helpers to support practices
"""
import ast
def has_else(node):
"""Checks if node has else"""
return (
isinstance(node, ast.If)
and len(node.orelse) > 0
)
def is_boolean(node):
"""Checks if node is True or False"""
return any([
isinstance(node, ast.Name)
and node.id in ('True', 'False'),
hasattr(ast, 'NameConstant') # Support for Python 3 NameConstant
and isinstance(node, getattr(ast, 'NameConstant')) # screw you pylint!
and str(node.value) in ('True', 'False')
])
def call_name_is(siter, name):
"""Checks the function call name"""
return (
isinstance(siter, ast.Call)
and hasattr(siter.func, 'attr')
and siter.func.attr == name
)
def target_names(targets):
"""Retrieves the target names"""
names = []
for entry in targets:
if isinstance(entry, ast.Name):
names.append(entry.id)
elif isinstance(entry, ast.Tuple):
for element in entry.elts:
if isinstance(element, ast.Name):
names.append(element.id)
return names
def importfrom_names(names):
"""Retrieves the importfrom names"""
return [n.name for n in names]
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_for_x_in_y_keys
|
python
|
def find_for_x_in_y_keys(node):
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
|
Finds looping against dictionary keys
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L13-L18
|
[
"def call_name_is(siter, name):\n \"\"\"Checks the function call name\"\"\"\n return (\n isinstance(siter, ast.Call)\n and hasattr(siter.func, 'attr')\n and siter.func.attr == name\n )\n"
] |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_if_x_retbool_else_retbool
|
python
|
def find_if_x_retbool_else_retbool(node):
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
|
Finds simplifiable if condition
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L25-L34
| null |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_path_join_using_plus
|
python
|
def find_path_join_using_plus(node):
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
|
Finds joining path with plus
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L40-L49
| null |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_assign_to_builtin
|
python
|
def find_assign_to_builtin(node):
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
|
Finds assigning to built-ins
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L55-L98
|
[
"def target_names(targets):\n \"\"\"Retrieves the target names\"\"\"\n names = []\n for entry in targets:\n if isinstance(entry, ast.Name):\n names.append(entry.id)\n elif isinstance(entry, ast.Tuple):\n for element in entry.elts:\n if isinstance(element, ast.Name):\n names.append(element.id)\n\n return names\n"
] |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_silent_exception
|
python
|
def find_silent_exception(node):
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
|
Finds silent generic exceptions
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L116-L123
| null |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_import_star
|
python
|
def find_import_star(node):
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
|
Finds import stars
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L129-L134
|
[
"def importfrom_names(names):\n \"\"\"Retrieves the importfrom names\"\"\"\n return [n.name for n in names]\n"
] |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_equals_true_or_false
|
python
|
def find_equals_true_or_false(node):
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
|
Finds equals true or false
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L140-L147
| null |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_poor_default_arg
|
python
|
def find_poor_default_arg(node):
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
|
Finds poor default args
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L154-L171
| null |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_if_expression_as_statement
|
python
|
def find_if_expression_as_statement(node):
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
|
Finds an "if" expression as a statement
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L178-L183
| null |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_comprehension_as_statement
|
python
|
def find_comprehension_as_statement(node):
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
|
Finds a comprehension as a statement
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L189-L196
| null |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
def find_generator_as_statement(node):
"""Finds a generator as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
omaraboumrad/mastool
|
mastool/practices.py
|
find_generator_as_statement
|
python
|
def find_generator_as_statement(node):
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.GeneratorExp)
)
|
Finds a generator as a statement
|
train
|
https://github.com/omaraboumrad/mastool/blob/0ec566de6717d03c6ec61affe5d1e9ff8d7e6ebd/mastool/practices.py#L202-L207
| null |
"""
Practices and Checks listing
"""
import ast
import sys
from mastool import helpers as h
@h.labeled(code='M001',
msg='looping against dictionary keys',
solution="use 'for key in dictionary' instead.")
def find_for_x_in_y_keys(node):
"""Finds looping against dictionary keys"""
return (
isinstance(node, ast.For)
and h.call_name_is(node.iter, 'keys')
)
@h.labeled(code='M002',
msg='simplifiable if condition',
solution="instead of 'if cond: return True else return False' "
"use: 'return cond'")
def find_if_x_retbool_else_retbool(node):
"""Finds simplifiable if condition"""
return (
isinstance(node, ast.If)
and isinstance(node.body[0], ast.Return)
and h.is_boolean(node.body[0].value)
and h.has_else(node)
and isinstance(node.orelse[0], ast.Return)
and h.is_boolean(node.orelse[0].value)
)
@h.labeled(code='M003',
msg='joining path with plus',
solution="instead of: 'p1 + '/' + p2', use 'os.path.join(p1, p2)'")
def find_path_join_using_plus(node):
"""Finds joining path with plus"""
return (
isinstance(node, ast.BinOp)
and isinstance(node.op, ast.Add)
and isinstance(node.left, ast.BinOp)
and isinstance(node.left.op, ast.Add)
and isinstance(node.left.right, ast.Str)
and node.left.right.s in ['/', "\\"]
)
@h.labeled(code='M004',
msg='assigning to built-in',
solution="change symbol name to something else")
def find_assign_to_builtin(node):
"""Finds assigning to built-ins"""
# The list of forbidden builtins is constant and not determined at
# runtime anyomre. The reason behind this change is that certain
# modules (like `gettext` for instance) would mess with the
# builtins module making this practice yield false positives.
if sys.version_info.major == 3:
builtins = {"abs", "all", "any", "ascii", "bin", "bool",
"bytearray", "bytes", "callable", "chr",
"classmethod", "compile", "complex", "delattr",
"dict", "dir", "divmod", "enumerate", "eval",
"exec", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "__import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "map", "max", "memoryview", "min",
"next", "object", "oct", "open", "ord", "pow",
"print", "property", "range", "repr", "reversed",
"round", "set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip"}
else:
builtins = {"abs", "all", "any", "basestring", "bin", "bool",
"bytearray", "callable", "chr", "classmethod",
"cmp", "compile", "complex", "delattr", "dict",
"dir", "divmod", "enumerate", "eval", "execfile",
"file", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help",
"hex", "id", "import__", "input", "int",
"isinstance", "issubclass", "iter", "len", "list",
"locals", "long", "map", "max", "memoryview",
"min", "next", "object", "oct", "open", "ord",
"pow", "print", "property", "range", "raw_input",
"reduce", "reload", "repr", "reversed", "round",
"set", "setattr", "slice", "sorted",
"staticmethod", "str", "sum", "super", "tuple",
"type", "unichr", "unicode", "vars", "xrange",
"zip"}
return (
isinstance(node, ast.Assign)
and len(builtins & set(h.target_names(node.targets))) > 0
)
@h.labeled(code='M005',
msg='catching a generic exception',
solution="instead of 'except:' use 'except [Specific]:'")
def find_generic_exception(node):
"""Finds generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
)
@h.labeled(code='M006',
msg='catching a generic exception and passing it silently',
solution="instead of 'except: pass' use 'except [Specific]:' "
"and handle it")
def find_silent_exception(node):
"""Finds silent generic exceptions"""
return (
isinstance(node, ast.ExceptHandler)
and node.type is None
and len(node.body) == 1
and isinstance(node.body[0], ast.Pass)
)
@h.labeled(code='M007',
msg='use of import star',
solution="make explicit imports")
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
@h.labeled(code='M008',
msg='comparing to True or False',
solution="instead of 'a == True' use 'a' or 'bool(a)'")
def find_equals_true_or_false(node):
"""Finds equals true or false"""
return (
isinstance(node, ast.Compare)
and len(node.ops) == 1
and isinstance(node.ops[0], ast.Eq)
and any(h.is_boolean(n) for n in node.comparators)
)
@h.labeled(code='M009',
msg='poor choice of default argument',
solution="use `None` as the default arg, and "
"initialize the variable inside the function block")
def find_poor_default_arg(node):
"""Finds poor default args"""
poor_defaults = [
ast.Call,
ast.Dict,
ast.DictComp,
ast.GeneratorExp,
ast.List,
ast.ListComp,
ast.Set,
ast.SetComp,
]
# pylint: disable=unidiomatic-typecheck
return (
isinstance(node, ast.FunctionDef)
and any((n for n in node.args.defaults if type(n) in poor_defaults))
)
# pylint: enable=unidiomatic-typecheck
@h.labeled(code='M010',
msg='use of "if" expression as statement',
solution='use a normal "if" condition instead')
def find_if_expression_as_statement(node):
"""Finds an "if" expression as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.IfExp)
)
@h.labeled(code='M011',
msg='use of a comprehension as statement',
solution='use a loop instead')
def find_comprehension_as_statement(node):
"""Finds a comprehension as a statement"""
return (
isinstance(node, ast.Expr)
and isinstance(node.value, (ast.ListComp,
ast.DictComp,
ast.SetComp))
)
@h.labeled(code='M012',
msg='use of a generator as statement',
solution='this done nothing!')
|
OTL/jps
|
jps/launcher.py
|
launch_modules_with_names
|
python
|
def launch_modules_with_names(modules_with_names, module_args={}, kill_before_launch=True):
'''launch module.main functions in another process'''
processes = []
if kill_before_launch:
for module_name, name in modules_with_names:
kill_module(name)
for module_name, name in modules_with_names:
m = importlib.import_module(module_name)
args = {}
if module_name in module_args:
args = module_args[module_name]
p1 = Process(target=m.main, args=args)
p1.daemon = True
p1.start()
processes.append(p1)
with open(get_launched_module_pid_file(name), 'w') as f:
f.write('{}'.format(p1.pid))
return processes
|
launch module.main functions in another process
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/launcher.py#L32-L49
|
[
"def get_launched_module_pid_file(module_name):\n return '{}/{}_jps_launch.pid'.format(tempfile.gettempdir(), module_name)\n",
"def kill_module(module_name):\n pid_file_path = get_launched_module_pid_file(module_name)\n if os.path.exists(pid_file_path):\n with open(pid_file_path, 'r') as f:\n old_pid = int(f.read())\n # try to kill existing process\n try:\n os.kill(old_pid, signal.SIGINT)\n except OSError:\n # do not mind if the process does not exists\n pass\n os.remove(pid_file_path)\n"
] |
from multiprocessing import Process
import getpass
import importlib
import os
import signal
import tempfile
def get_launched_module_pid_file(module_name):
return '{}/{}_jps_launch.pid'.format(tempfile.gettempdir(), module_name)
def kill_module(module_name):
pid_file_path = get_launched_module_pid_file(module_name)
if os.path.exists(pid_file_path):
with open(pid_file_path, 'r') as f:
old_pid = int(f.read())
# try to kill existing process
try:
os.kill(old_pid, signal.SIGINT)
except OSError:
# do not mind if the process does not exists
pass
os.remove(pid_file_path)
def launch_modules(module_names, module_args={}, kill_before_launch=True):
return launch_modules_with_names([[x, x + getpass.getuser()] for x in module_names],
module_args=module_args, kill_before_launch=kill_before_launch)
|
OTL/jps
|
jps/publisher.py
|
Publisher.publish
|
python
|
def publish(self, payload):
'''Publish payload to the topic
.. note:: If you publishes just after creating Publisher instance, it will causes
lost of message. You have to add sleep if you just want to publish once.
>>> pub = jps.Publisher('topic')
>>> time.sleep(0.1)
>>> pub.publish('{data}')
:param payload: data to be published. This is ok if the data is not json.
'''
if self._serializer is not None:
payload = self._serializer(payload)
if self._topic == '*':
# special case for publish everything
msg = payload
else:
msg = '{topic} {data}'.format(topic=self._topic, data=payload)
self._socket.send(cast_bytes(msg))
|
Publish payload to the topic
.. note:: If you publishes just after creating Publisher instance, it will causes
lost of message. You have to add sleep if you just want to publish once.
>>> pub = jps.Publisher('topic')
>>> time.sleep(0.1)
>>> pub.publish('{data}')
:param payload: data to be published. This is ok if the data is not json.
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/publisher.py#L47-L66
| null |
class Publisher(object):
'''Publishes data for a topic.
Example:
>>> pub = jps.Publisher('special_topic')
>>> pub.publish('{"name": "hoge"}')
:param topic_name: Topic name
:param host: host of subscriber/forwarder
:param pub_port: port of subscriber/forwarder
:param serializer: this function is applied before publish (default: None)
'''
def __init__(self, topic_name, host=None, pub_port=None,
serializer='DEFAULT'):
topic_name = get_remapped_topic_name(topic_name)
if topic_name.count(' '):
raise Error('you can\'t use " " for topic_name')
if topic_name == '':
raise Error('empty topic name is not supported')
if host is None:
host = get_master_host()
if pub_port is None:
pub_port = get_pub_port()
if serializer is 'DEFAULT':
serializer = get_default_serializer()
self._serializer = serializer
context = zmq.Context()
self._socket = context.socket(zmq.PUB)
self._socket.connect(
'tcp://{host}:{port}'.format(host=host, port=pub_port))
self._topic = cast_bytes(topic_name + get_topic_suffix())
|
OTL/jps
|
jps/utils.py
|
JsonMultiplePublisher.publish
|
python
|
def publish(self, json_msg):
'''
json_msg = '{"topic1": 1.0, "topic2": {"x": 0.1}}'
'''
pyobj = json.loads(json_msg)
for topic, value in pyobj.items():
msg = '{topic} {data}'.format(topic=topic, data=json.dumps(value))
self._pub.publish(msg)
|
json_msg = '{"topic1": 1.0, "topic2": {"x": 0.1}}'
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/utils.py#L24-L31
|
[
"def publish(self, payload):\n '''Publish payload to the topic\n\n .. note:: If you publishes just after creating Publisher instance, it will causes\n lost of message. You have to add sleep if you just want to publish once.\n\n >>> pub = jps.Publisher('topic')\n >>> time.sleep(0.1)\n >>> pub.publish('{data}')\n\n :param payload: data to be published. This is ok if the data is not json.\n '''\n if self._serializer is not None:\n payload = self._serializer(payload)\n if self._topic == '*':\n # special case for publish everything\n msg = payload\n else:\n msg = '{topic} {data}'.format(topic=self._topic, data=payload)\n self._socket.send(cast_bytes(msg))\n"
] |
class JsonMultiplePublisher(object):
'''publish multiple topics by one json message
Example:
>>> p = JsonMultiplePublisher()
>>> p.publish('{"topic1": 1.0, "topic2": {"x": 0.1}}')
'''
def __init__(self):
self._pub = Publisher('*')
|
OTL/jps
|
jps/tools.py
|
pub
|
python
|
def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''publishes the data to the topic
:param topic_name: name of the topic
:param json_msg: data to be published
:param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
'''
pub = jps.Publisher(topic_name, host=host, pub_port=pub_port)
time.sleep(0.1)
if repeat_rate is None:
pub.publish(json_msg)
else:
try:
while True:
pub.publish(json_msg)
time.sleep(1.0 / repeat_rate)
except KeyboardInterrupt:
pass
|
publishes the data to the topic
:param topic_name: name of the topic
:param json_msg: data to be published
:param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/tools.py#L11-L28
|
[
"def publish(self, payload):\n '''Publish payload to the topic\n\n .. note:: If you publishes just after creating Publisher instance, it will causes\n lost of message. You have to add sleep if you just want to publish once.\n\n >>> pub = jps.Publisher('topic')\n >>> time.sleep(0.1)\n >>> pub.publish('{data}')\n\n :param payload: data to be published. This is ok if the data is not json.\n '''\n if self._serializer is not None:\n payload = self._serializer(payload)\n if self._topic == '*':\n # special case for publish everything\n msg = payload\n else:\n msg = '{topic} {data}'.format(topic=self._topic, data=payload)\n self._socket.send(cast_bytes(msg))\n"
] |
import argparse
import datetime
import jps
import json
import os
import signal
import sys
import time
def echo(topic_name, num_print=None, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''print the data for the given topic forever
'''
class PrintWithCount(object):
def __init__(self, out):
self._printed = 0
self._out = out
def print_and_increment(self, msg):
self._out.write('{}\n'.format(msg))
self._printed += 1
def get_count(self):
return self._printed
counter = PrintWithCount(out)
sub = jps.Subscriber(
topic_name, counter.print_and_increment, host=host, sub_port=sub_port)
try:
while num_print is None or counter.get_count() < num_print:
sub.spin_once()
time.sleep(0.0001)
except KeyboardInterrupt:
pass
def show_list(timeout_in_sec, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''get the name list of the topics, and print it
'''
class TopicNameStore(object):
def __init__(self):
self._topic_names = set()
def callback(self, msg, topic):
self._topic_names.add(topic)
def get_topic_names(self):
names = list(self._topic_names)
names.sort()
return names
store = TopicNameStore()
sub = jps.Subscriber('*', store.callback, host=host, sub_port=sub_port)
sleep_sec = 0.01
for i in range(int(timeout_in_sec / sleep_sec)):
sub.spin_once(sleep_sec)
time.sleep(0.001) # for context switch
for name in store.get_topic_names():
out.write('{}\n'.format(name))
def record(file_path, topic_names=[], host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''record the topic data to the file
'''
class TopicRecorder(object):
def __init__(self, file_path, topic_names):
self._topic_names = topic_names
self._file_path = file_path
self._output = open(self._file_path, 'w')
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
header = {}
header['topic_names'] = topic_names
header['start_date'] = str(datetime.datetime.today())
header_string = json.dumps({'header': header})
tail_removed_header = header_string[0:-1]
self._output.write(tail_removed_header + ',\n')
self._output.write(' "data": [\n')
self._has_no_data = True
def callback(self, msg, topic):
if self._output.closed:
return
raw_msg = '{topic} {msg}'.format(topic=topic, msg=msg)
if not self._topic_names or topic in self._topic_names:
if not self._has_no_data:
self._output.write(',\n')
else:
self._has_no_data = False
self._output.write(json.dumps([time.time(), raw_msg]))
def close(self):
if not self._output.closed:
self._output.write('\n]}')
self._output.close()
def _handle_signal(self, signum, frame):
self.close()
sys.exit(0)
writer = TopicRecorder(file_path, topic_names)
sub = jps.Subscriber('*', writer.callback, host=host, sub_port=sub_port)
sub.spin()
writer.close()
def play(file_path, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''replay the recorded data by record()
'''
pub = jps.Publisher('*', host=host, pub_port=pub_port)
time.sleep(0.2)
last_time = None
print('start publishing file {}'.format(file_path))
with open(file_path, 'r') as f:
# super hack to remove header
f.readline()
f.readline()
for line in f:
if line.startswith(']}'):
break
publish_time, raw_msg = json.loads(line.rstrip(',\n'))
if last_time is not None:
time.sleep(publish_time - last_time)
pub.publish(raw_msg.rstrip())
last_time = publish_time
print('fnished')
def topic_command():
'''command line tool for jps
'''
parser = argparse.ArgumentParser(description='json pub/sub tool')
pub_common_parser = jps.ArgumentParser(subscriber=False, add_help=False)
sub_common_parser = jps.ArgumentParser(publisher=False, add_help=False)
command_parsers = parser.add_subparsers(dest='command', help='command')
pub_parser = command_parsers.add_parser(
'pub', help='publish topic from command line', parents=[pub_common_parser])
pub_parser.add_argument('topic_name', type=str, help='name of topic')
pub_parser.add_argument(
'data', type=str, help='json string data to be published')
pub_parser.add_argument('--repeat', '-r', help='repeat in hz', type=float)
echo_parser = command_parsers.add_parser(
'echo', help='show topic data', parents=[sub_common_parser])
echo_parser.add_argument('topic_name', type=str, help='name of topic')
echo_parser.add_argument(
'--num', '-n', help='print N times and exit', type=int,
default=None)
list_parser = command_parsers.add_parser(
'list', help='show topic list', parents=[sub_common_parser])
list_parser.add_argument(
'--timeout', '-t', help='timeout in sec', type=float,
default=1.0)
record_parser = command_parsers.add_parser(
'record', help='record topic data', parents=[sub_common_parser])
record_parser.add_argument('topic_names', nargs='*',
help='topic names to be recorded', type=str)
record_parser.add_argument(
'--file', '-f', help='output file name (default: record.json)',
type=str, default='record.json')
play_parser = command_parsers.add_parser(
'play', help='play recorded topic data', parents=[pub_common_parser])
play_parser.add_argument('file', type=str, help='input file name')
args = parser.parse_args()
if args.command == 'pub':
pub(args.topic_name, args.data, repeat_rate=args.repeat,
host=args.host, pub_port=args.publisher_port)
elif args.command == 'echo':
echo(args.topic_name, args.num,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'list':
show_list(args.timeout, host=args.host, sub_port=args.subscriber_port)
elif args.command == 'record':
record(args.file, args.topic_names,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'play':
play(args.file, host=args.host, pub_port=args.publisher_port)
else:
parser.print_help()
|
OTL/jps
|
jps/tools.py
|
echo
|
python
|
def echo(topic_name, num_print=None, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''print the data for the given topic forever
'''
class PrintWithCount(object):
def __init__(self, out):
self._printed = 0
self._out = out
def print_and_increment(self, msg):
self._out.write('{}\n'.format(msg))
self._printed += 1
def get_count(self):
return self._printed
counter = PrintWithCount(out)
sub = jps.Subscriber(
topic_name, counter.print_and_increment, host=host, sub_port=sub_port)
try:
while num_print is None or counter.get_count() < num_print:
sub.spin_once()
time.sleep(0.0001)
except KeyboardInterrupt:
pass
|
print the data for the given topic forever
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/tools.py#L31-L55
|
[
"def spin_once(self, polling_sec=0.010):\n '''Read the queued data and call the callback for them.\n You have to handle KeyboardInterrupt (\\C-c) manually.\n\n Example:\n\n >>> def callback(msg):\n ... print msg\n >>> sub = jps.Subscriber('topic_name', callback)\n >>> try:\n ... while True:\n ... sub.spin_once():\n ... time.sleep(0.1)\n ... except KeyboardInterrupt:\n ... pass\n\n '''\n # parse all data\n while True:\n socks = dict(self._poller.poll(polling_sec * 1000))\n if socks.get(self._socket) == zmq.POLLIN:\n msg = self._socket.recv()\n self._callback(msg)\n else:\n return\n"
] |
import argparse
import datetime
import jps
import json
import os
import signal
import sys
import time
def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''publishes the data to the topic
:param topic_name: name of the topic
:param json_msg: data to be published
:param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
'''
pub = jps.Publisher(topic_name, host=host, pub_port=pub_port)
time.sleep(0.1)
if repeat_rate is None:
pub.publish(json_msg)
else:
try:
while True:
pub.publish(json_msg)
time.sleep(1.0 / repeat_rate)
except KeyboardInterrupt:
pass
def show_list(timeout_in_sec, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''get the name list of the topics, and print it
'''
class TopicNameStore(object):
def __init__(self):
self._topic_names = set()
def callback(self, msg, topic):
self._topic_names.add(topic)
def get_topic_names(self):
names = list(self._topic_names)
names.sort()
return names
store = TopicNameStore()
sub = jps.Subscriber('*', store.callback, host=host, sub_port=sub_port)
sleep_sec = 0.01
for i in range(int(timeout_in_sec / sleep_sec)):
sub.spin_once(sleep_sec)
time.sleep(0.001) # for context switch
for name in store.get_topic_names():
out.write('{}\n'.format(name))
def record(file_path, topic_names=[], host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''record the topic data to the file
'''
class TopicRecorder(object):
def __init__(self, file_path, topic_names):
self._topic_names = topic_names
self._file_path = file_path
self._output = open(self._file_path, 'w')
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
header = {}
header['topic_names'] = topic_names
header['start_date'] = str(datetime.datetime.today())
header_string = json.dumps({'header': header})
tail_removed_header = header_string[0:-1]
self._output.write(tail_removed_header + ',\n')
self._output.write(' "data": [\n')
self._has_no_data = True
def callback(self, msg, topic):
if self._output.closed:
return
raw_msg = '{topic} {msg}'.format(topic=topic, msg=msg)
if not self._topic_names or topic in self._topic_names:
if not self._has_no_data:
self._output.write(',\n')
else:
self._has_no_data = False
self._output.write(json.dumps([time.time(), raw_msg]))
def close(self):
if not self._output.closed:
self._output.write('\n]}')
self._output.close()
def _handle_signal(self, signum, frame):
self.close()
sys.exit(0)
writer = TopicRecorder(file_path, topic_names)
sub = jps.Subscriber('*', writer.callback, host=host, sub_port=sub_port)
sub.spin()
writer.close()
def play(file_path, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''replay the recorded data by record()
'''
pub = jps.Publisher('*', host=host, pub_port=pub_port)
time.sleep(0.2)
last_time = None
print('start publishing file {}'.format(file_path))
with open(file_path, 'r') as f:
# super hack to remove header
f.readline()
f.readline()
for line in f:
if line.startswith(']}'):
break
publish_time, raw_msg = json.loads(line.rstrip(',\n'))
if last_time is not None:
time.sleep(publish_time - last_time)
pub.publish(raw_msg.rstrip())
last_time = publish_time
print('fnished')
def topic_command():
'''command line tool for jps
'''
parser = argparse.ArgumentParser(description='json pub/sub tool')
pub_common_parser = jps.ArgumentParser(subscriber=False, add_help=False)
sub_common_parser = jps.ArgumentParser(publisher=False, add_help=False)
command_parsers = parser.add_subparsers(dest='command', help='command')
pub_parser = command_parsers.add_parser(
'pub', help='publish topic from command line', parents=[pub_common_parser])
pub_parser.add_argument('topic_name', type=str, help='name of topic')
pub_parser.add_argument(
'data', type=str, help='json string data to be published')
pub_parser.add_argument('--repeat', '-r', help='repeat in hz', type=float)
echo_parser = command_parsers.add_parser(
'echo', help='show topic data', parents=[sub_common_parser])
echo_parser.add_argument('topic_name', type=str, help='name of topic')
echo_parser.add_argument(
'--num', '-n', help='print N times and exit', type=int,
default=None)
list_parser = command_parsers.add_parser(
'list', help='show topic list', parents=[sub_common_parser])
list_parser.add_argument(
'--timeout', '-t', help='timeout in sec', type=float,
default=1.0)
record_parser = command_parsers.add_parser(
'record', help='record topic data', parents=[sub_common_parser])
record_parser.add_argument('topic_names', nargs='*',
help='topic names to be recorded', type=str)
record_parser.add_argument(
'--file', '-f', help='output file name (default: record.json)',
type=str, default='record.json')
play_parser = command_parsers.add_parser(
'play', help='play recorded topic data', parents=[pub_common_parser])
play_parser.add_argument('file', type=str, help='input file name')
args = parser.parse_args()
if args.command == 'pub':
pub(args.topic_name, args.data, repeat_rate=args.repeat,
host=args.host, pub_port=args.publisher_port)
elif args.command == 'echo':
echo(args.topic_name, args.num,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'list':
show_list(args.timeout, host=args.host, sub_port=args.subscriber_port)
elif args.command == 'record':
record(args.file, args.topic_names,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'play':
play(args.file, host=args.host, pub_port=args.publisher_port)
else:
parser.print_help()
|
OTL/jps
|
jps/tools.py
|
show_list
|
python
|
def show_list(timeout_in_sec, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''get the name list of the topics, and print it
'''
class TopicNameStore(object):
def __init__(self):
self._topic_names = set()
def callback(self, msg, topic):
self._topic_names.add(topic)
def get_topic_names(self):
names = list(self._topic_names)
names.sort()
return names
store = TopicNameStore()
sub = jps.Subscriber('*', store.callback, host=host, sub_port=sub_port)
sleep_sec = 0.01
for i in range(int(timeout_in_sec / sleep_sec)):
sub.spin_once(sleep_sec)
time.sleep(0.001) # for context switch
for name in store.get_topic_names():
out.write('{}\n'.format(name))
|
get the name list of the topics, and print it
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/tools.py#L58-L81
|
[
"def spin_once(self, polling_sec=0.010):\n '''Read the queued data and call the callback for them.\n You have to handle KeyboardInterrupt (\\C-c) manually.\n\n Example:\n\n >>> def callback(msg):\n ... print msg\n >>> sub = jps.Subscriber('topic_name', callback)\n >>> try:\n ... while True:\n ... sub.spin_once():\n ... time.sleep(0.1)\n ... except KeyboardInterrupt:\n ... pass\n\n '''\n # parse all data\n while True:\n socks = dict(self._poller.poll(polling_sec * 1000))\n if socks.get(self._socket) == zmq.POLLIN:\n msg = self._socket.recv()\n self._callback(msg)\n else:\n return\n"
] |
import argparse
import datetime
import jps
import json
import os
import signal
import sys
import time
def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''publishes the data to the topic
:param topic_name: name of the topic
:param json_msg: data to be published
:param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
'''
pub = jps.Publisher(topic_name, host=host, pub_port=pub_port)
time.sleep(0.1)
if repeat_rate is None:
pub.publish(json_msg)
else:
try:
while True:
pub.publish(json_msg)
time.sleep(1.0 / repeat_rate)
except KeyboardInterrupt:
pass
def echo(topic_name, num_print=None, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''print the data for the given topic forever
'''
class PrintWithCount(object):
def __init__(self, out):
self._printed = 0
self._out = out
def print_and_increment(self, msg):
self._out.write('{}\n'.format(msg))
self._printed += 1
def get_count(self):
return self._printed
counter = PrintWithCount(out)
sub = jps.Subscriber(
topic_name, counter.print_and_increment, host=host, sub_port=sub_port)
try:
while num_print is None or counter.get_count() < num_print:
sub.spin_once()
time.sleep(0.0001)
except KeyboardInterrupt:
pass
def record(file_path, topic_names=[], host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''record the topic data to the file
'''
class TopicRecorder(object):
def __init__(self, file_path, topic_names):
self._topic_names = topic_names
self._file_path = file_path
self._output = open(self._file_path, 'w')
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
header = {}
header['topic_names'] = topic_names
header['start_date'] = str(datetime.datetime.today())
header_string = json.dumps({'header': header})
tail_removed_header = header_string[0:-1]
self._output.write(tail_removed_header + ',\n')
self._output.write(' "data": [\n')
self._has_no_data = True
def callback(self, msg, topic):
if self._output.closed:
return
raw_msg = '{topic} {msg}'.format(topic=topic, msg=msg)
if not self._topic_names or topic in self._topic_names:
if not self._has_no_data:
self._output.write(',\n')
else:
self._has_no_data = False
self._output.write(json.dumps([time.time(), raw_msg]))
def close(self):
if not self._output.closed:
self._output.write('\n]}')
self._output.close()
def _handle_signal(self, signum, frame):
self.close()
sys.exit(0)
writer = TopicRecorder(file_path, topic_names)
sub = jps.Subscriber('*', writer.callback, host=host, sub_port=sub_port)
sub.spin()
writer.close()
def play(file_path, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''replay the recorded data by record()
'''
pub = jps.Publisher('*', host=host, pub_port=pub_port)
time.sleep(0.2)
last_time = None
print('start publishing file {}'.format(file_path))
with open(file_path, 'r') as f:
# super hack to remove header
f.readline()
f.readline()
for line in f:
if line.startswith(']}'):
break
publish_time, raw_msg = json.loads(line.rstrip(',\n'))
if last_time is not None:
time.sleep(publish_time - last_time)
pub.publish(raw_msg.rstrip())
last_time = publish_time
print('fnished')
def topic_command():
'''command line tool for jps
'''
parser = argparse.ArgumentParser(description='json pub/sub tool')
pub_common_parser = jps.ArgumentParser(subscriber=False, add_help=False)
sub_common_parser = jps.ArgumentParser(publisher=False, add_help=False)
command_parsers = parser.add_subparsers(dest='command', help='command')
pub_parser = command_parsers.add_parser(
'pub', help='publish topic from command line', parents=[pub_common_parser])
pub_parser.add_argument('topic_name', type=str, help='name of topic')
pub_parser.add_argument(
'data', type=str, help='json string data to be published')
pub_parser.add_argument('--repeat', '-r', help='repeat in hz', type=float)
echo_parser = command_parsers.add_parser(
'echo', help='show topic data', parents=[sub_common_parser])
echo_parser.add_argument('topic_name', type=str, help='name of topic')
echo_parser.add_argument(
'--num', '-n', help='print N times and exit', type=int,
default=None)
list_parser = command_parsers.add_parser(
'list', help='show topic list', parents=[sub_common_parser])
list_parser.add_argument(
'--timeout', '-t', help='timeout in sec', type=float,
default=1.0)
record_parser = command_parsers.add_parser(
'record', help='record topic data', parents=[sub_common_parser])
record_parser.add_argument('topic_names', nargs='*',
help='topic names to be recorded', type=str)
record_parser.add_argument(
'--file', '-f', help='output file name (default: record.json)',
type=str, default='record.json')
play_parser = command_parsers.add_parser(
'play', help='play recorded topic data', parents=[pub_common_parser])
play_parser.add_argument('file', type=str, help='input file name')
args = parser.parse_args()
if args.command == 'pub':
pub(args.topic_name, args.data, repeat_rate=args.repeat,
host=args.host, pub_port=args.publisher_port)
elif args.command == 'echo':
echo(args.topic_name, args.num,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'list':
show_list(args.timeout, host=args.host, sub_port=args.subscriber_port)
elif args.command == 'record':
record(args.file, args.topic_names,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'play':
play(args.file, host=args.host, pub_port=args.publisher_port)
else:
parser.print_help()
|
OTL/jps
|
jps/tools.py
|
record
|
python
|
def record(file_path, topic_names=[], host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''record the topic data to the file
'''
class TopicRecorder(object):
def __init__(self, file_path, topic_names):
self._topic_names = topic_names
self._file_path = file_path
self._output = open(self._file_path, 'w')
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
header = {}
header['topic_names'] = topic_names
header['start_date'] = str(datetime.datetime.today())
header_string = json.dumps({'header': header})
tail_removed_header = header_string[0:-1]
self._output.write(tail_removed_header + ',\n')
self._output.write(' "data": [\n')
self._has_no_data = True
def callback(self, msg, topic):
if self._output.closed:
return
raw_msg = '{topic} {msg}'.format(topic=topic, msg=msg)
if not self._topic_names or topic in self._topic_names:
if not self._has_no_data:
self._output.write(',\n')
else:
self._has_no_data = False
self._output.write(json.dumps([time.time(), raw_msg]))
def close(self):
if not self._output.closed:
self._output.write('\n]}')
self._output.close()
def _handle_signal(self, signum, frame):
self.close()
sys.exit(0)
writer = TopicRecorder(file_path, topic_names)
sub = jps.Subscriber('*', writer.callback, host=host, sub_port=sub_port)
sub.spin()
writer.close()
|
record the topic data to the file
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/tools.py#L84-L127
|
[
"def spin(self, use_thread=False):\n '''call callback for all data forever (until \\C-c)\n\n :param use_thread: use thread for spin (do not block)\n '''\n if use_thread:\n if self._thread is not None:\n raise Error('spin called twice')\n self._thread = threading.Thread(target=self._spin_internal)\n self._thread.setDaemon(True)\n self._thread.start()\n else:\n self._spin_internal()\n"
] |
import argparse
import datetime
import jps
import json
import os
import signal
import sys
import time
def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''publishes the data to the topic
:param topic_name: name of the topic
:param json_msg: data to be published
:param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
'''
pub = jps.Publisher(topic_name, host=host, pub_port=pub_port)
time.sleep(0.1)
if repeat_rate is None:
pub.publish(json_msg)
else:
try:
while True:
pub.publish(json_msg)
time.sleep(1.0 / repeat_rate)
except KeyboardInterrupt:
pass
def echo(topic_name, num_print=None, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''print the data for the given topic forever
'''
class PrintWithCount(object):
def __init__(self, out):
self._printed = 0
self._out = out
def print_and_increment(self, msg):
self._out.write('{}\n'.format(msg))
self._printed += 1
def get_count(self):
return self._printed
counter = PrintWithCount(out)
sub = jps.Subscriber(
topic_name, counter.print_and_increment, host=host, sub_port=sub_port)
try:
while num_print is None or counter.get_count() < num_print:
sub.spin_once()
time.sleep(0.0001)
except KeyboardInterrupt:
pass
def show_list(timeout_in_sec, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''get the name list of the topics, and print it
'''
class TopicNameStore(object):
def __init__(self):
self._topic_names = set()
def callback(self, msg, topic):
self._topic_names.add(topic)
def get_topic_names(self):
names = list(self._topic_names)
names.sort()
return names
store = TopicNameStore()
sub = jps.Subscriber('*', store.callback, host=host, sub_port=sub_port)
sleep_sec = 0.01
for i in range(int(timeout_in_sec / sleep_sec)):
sub.spin_once(sleep_sec)
time.sleep(0.001) # for context switch
for name in store.get_topic_names():
out.write('{}\n'.format(name))
def play(file_path, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''replay the recorded data by record()
'''
pub = jps.Publisher('*', host=host, pub_port=pub_port)
time.sleep(0.2)
last_time = None
print('start publishing file {}'.format(file_path))
with open(file_path, 'r') as f:
# super hack to remove header
f.readline()
f.readline()
for line in f:
if line.startswith(']}'):
break
publish_time, raw_msg = json.loads(line.rstrip(',\n'))
if last_time is not None:
time.sleep(publish_time - last_time)
pub.publish(raw_msg.rstrip())
last_time = publish_time
print('fnished')
def topic_command():
'''command line tool for jps
'''
parser = argparse.ArgumentParser(description='json pub/sub tool')
pub_common_parser = jps.ArgumentParser(subscriber=False, add_help=False)
sub_common_parser = jps.ArgumentParser(publisher=False, add_help=False)
command_parsers = parser.add_subparsers(dest='command', help='command')
pub_parser = command_parsers.add_parser(
'pub', help='publish topic from command line', parents=[pub_common_parser])
pub_parser.add_argument('topic_name', type=str, help='name of topic')
pub_parser.add_argument(
'data', type=str, help='json string data to be published')
pub_parser.add_argument('--repeat', '-r', help='repeat in hz', type=float)
echo_parser = command_parsers.add_parser(
'echo', help='show topic data', parents=[sub_common_parser])
echo_parser.add_argument('topic_name', type=str, help='name of topic')
echo_parser.add_argument(
'--num', '-n', help='print N times and exit', type=int,
default=None)
list_parser = command_parsers.add_parser(
'list', help='show topic list', parents=[sub_common_parser])
list_parser.add_argument(
'--timeout', '-t', help='timeout in sec', type=float,
default=1.0)
record_parser = command_parsers.add_parser(
'record', help='record topic data', parents=[sub_common_parser])
record_parser.add_argument('topic_names', nargs='*',
help='topic names to be recorded', type=str)
record_parser.add_argument(
'--file', '-f', help='output file name (default: record.json)',
type=str, default='record.json')
play_parser = command_parsers.add_parser(
'play', help='play recorded topic data', parents=[pub_common_parser])
play_parser.add_argument('file', type=str, help='input file name')
args = parser.parse_args()
if args.command == 'pub':
pub(args.topic_name, args.data, repeat_rate=args.repeat,
host=args.host, pub_port=args.publisher_port)
elif args.command == 'echo':
echo(args.topic_name, args.num,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'list':
show_list(args.timeout, host=args.host, sub_port=args.subscriber_port)
elif args.command == 'record':
record(args.file, args.topic_names,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'play':
play(args.file, host=args.host, pub_port=args.publisher_port)
else:
parser.print_help()
|
OTL/jps
|
jps/tools.py
|
play
|
python
|
def play(file_path, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''replay the recorded data by record()
'''
pub = jps.Publisher('*', host=host, pub_port=pub_port)
time.sleep(0.2)
last_time = None
print('start publishing file {}'.format(file_path))
with open(file_path, 'r') as f:
# super hack to remove header
f.readline()
f.readline()
for line in f:
if line.startswith(']}'):
break
publish_time, raw_msg = json.loads(line.rstrip(',\n'))
if last_time is not None:
time.sleep(publish_time - last_time)
pub.publish(raw_msg.rstrip())
last_time = publish_time
print('fnished')
|
replay the recorded data by record()
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/tools.py#L130-L149
| null |
import argparse
import datetime
import jps
import json
import os
import signal
import sys
import time
def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''publishes the data to the topic
:param topic_name: name of the topic
:param json_msg: data to be published
:param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
'''
pub = jps.Publisher(topic_name, host=host, pub_port=pub_port)
time.sleep(0.1)
if repeat_rate is None:
pub.publish(json_msg)
else:
try:
while True:
pub.publish(json_msg)
time.sleep(1.0 / repeat_rate)
except KeyboardInterrupt:
pass
def echo(topic_name, num_print=None, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''print the data for the given topic forever
'''
class PrintWithCount(object):
def __init__(self, out):
self._printed = 0
self._out = out
def print_and_increment(self, msg):
self._out.write('{}\n'.format(msg))
self._printed += 1
def get_count(self):
return self._printed
counter = PrintWithCount(out)
sub = jps.Subscriber(
topic_name, counter.print_and_increment, host=host, sub_port=sub_port)
try:
while num_print is None or counter.get_count() < num_print:
sub.spin_once()
time.sleep(0.0001)
except KeyboardInterrupt:
pass
def show_list(timeout_in_sec, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''get the name list of the topics, and print it
'''
class TopicNameStore(object):
def __init__(self):
self._topic_names = set()
def callback(self, msg, topic):
self._topic_names.add(topic)
def get_topic_names(self):
names = list(self._topic_names)
names.sort()
return names
store = TopicNameStore()
sub = jps.Subscriber('*', store.callback, host=host, sub_port=sub_port)
sleep_sec = 0.01
for i in range(int(timeout_in_sec / sleep_sec)):
sub.spin_once(sleep_sec)
time.sleep(0.001) # for context switch
for name in store.get_topic_names():
out.write('{}\n'.format(name))
def record(file_path, topic_names=[], host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''record the topic data to the file
'''
class TopicRecorder(object):
def __init__(self, file_path, topic_names):
self._topic_names = topic_names
self._file_path = file_path
self._output = open(self._file_path, 'w')
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
header = {}
header['topic_names'] = topic_names
header['start_date'] = str(datetime.datetime.today())
header_string = json.dumps({'header': header})
tail_removed_header = header_string[0:-1]
self._output.write(tail_removed_header + ',\n')
self._output.write(' "data": [\n')
self._has_no_data = True
def callback(self, msg, topic):
if self._output.closed:
return
raw_msg = '{topic} {msg}'.format(topic=topic, msg=msg)
if not self._topic_names or topic in self._topic_names:
if not self._has_no_data:
self._output.write(',\n')
else:
self._has_no_data = False
self._output.write(json.dumps([time.time(), raw_msg]))
def close(self):
if not self._output.closed:
self._output.write('\n]}')
self._output.close()
def _handle_signal(self, signum, frame):
self.close()
sys.exit(0)
writer = TopicRecorder(file_path, topic_names)
sub = jps.Subscriber('*', writer.callback, host=host, sub_port=sub_port)
sub.spin()
writer.close()
def topic_command():
'''command line tool for jps
'''
parser = argparse.ArgumentParser(description='json pub/sub tool')
pub_common_parser = jps.ArgumentParser(subscriber=False, add_help=False)
sub_common_parser = jps.ArgumentParser(publisher=False, add_help=False)
command_parsers = parser.add_subparsers(dest='command', help='command')
pub_parser = command_parsers.add_parser(
'pub', help='publish topic from command line', parents=[pub_common_parser])
pub_parser.add_argument('topic_name', type=str, help='name of topic')
pub_parser.add_argument(
'data', type=str, help='json string data to be published')
pub_parser.add_argument('--repeat', '-r', help='repeat in hz', type=float)
echo_parser = command_parsers.add_parser(
'echo', help='show topic data', parents=[sub_common_parser])
echo_parser.add_argument('topic_name', type=str, help='name of topic')
echo_parser.add_argument(
'--num', '-n', help='print N times and exit', type=int,
default=None)
list_parser = command_parsers.add_parser(
'list', help='show topic list', parents=[sub_common_parser])
list_parser.add_argument(
'--timeout', '-t', help='timeout in sec', type=float,
default=1.0)
record_parser = command_parsers.add_parser(
'record', help='record topic data', parents=[sub_common_parser])
record_parser.add_argument('topic_names', nargs='*',
help='topic names to be recorded', type=str)
record_parser.add_argument(
'--file', '-f', help='output file name (default: record.json)',
type=str, default='record.json')
play_parser = command_parsers.add_parser(
'play', help='play recorded topic data', parents=[pub_common_parser])
play_parser.add_argument('file', type=str, help='input file name')
args = parser.parse_args()
if args.command == 'pub':
pub(args.topic_name, args.data, repeat_rate=args.repeat,
host=args.host, pub_port=args.publisher_port)
elif args.command == 'echo':
echo(args.topic_name, args.num,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'list':
show_list(args.timeout, host=args.host, sub_port=args.subscriber_port)
elif args.command == 'record':
record(args.file, args.topic_names,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'play':
play(args.file, host=args.host, pub_port=args.publisher_port)
else:
parser.print_help()
|
OTL/jps
|
jps/tools.py
|
topic_command
|
python
|
def topic_command():
'''command line tool for jps
'''
parser = argparse.ArgumentParser(description='json pub/sub tool')
pub_common_parser = jps.ArgumentParser(subscriber=False, add_help=False)
sub_common_parser = jps.ArgumentParser(publisher=False, add_help=False)
command_parsers = parser.add_subparsers(dest='command', help='command')
pub_parser = command_parsers.add_parser(
'pub', help='publish topic from command line', parents=[pub_common_parser])
pub_parser.add_argument('topic_name', type=str, help='name of topic')
pub_parser.add_argument(
'data', type=str, help='json string data to be published')
pub_parser.add_argument('--repeat', '-r', help='repeat in hz', type=float)
echo_parser = command_parsers.add_parser(
'echo', help='show topic data', parents=[sub_common_parser])
echo_parser.add_argument('topic_name', type=str, help='name of topic')
echo_parser.add_argument(
'--num', '-n', help='print N times and exit', type=int,
default=None)
list_parser = command_parsers.add_parser(
'list', help='show topic list', parents=[sub_common_parser])
list_parser.add_argument(
'--timeout', '-t', help='timeout in sec', type=float,
default=1.0)
record_parser = command_parsers.add_parser(
'record', help='record topic data', parents=[sub_common_parser])
record_parser.add_argument('topic_names', nargs='*',
help='topic names to be recorded', type=str)
record_parser.add_argument(
'--file', '-f', help='output file name (default: record.json)',
type=str, default='record.json')
play_parser = command_parsers.add_parser(
'play', help='play recorded topic data', parents=[pub_common_parser])
play_parser.add_argument('file', type=str, help='input file name')
args = parser.parse_args()
if args.command == 'pub':
pub(args.topic_name, args.data, repeat_rate=args.repeat,
host=args.host, pub_port=args.publisher_port)
elif args.command == 'echo':
echo(args.topic_name, args.num,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'list':
show_list(args.timeout, host=args.host, sub_port=args.subscriber_port)
elif args.command == 'record':
record(args.file, args.topic_names,
host=args.host, sub_port=args.subscriber_port)
elif args.command == 'play':
play(args.file, host=args.host, pub_port=args.publisher_port)
else:
parser.print_help()
|
command line tool for jps
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/tools.py#L152-L208
|
[
"def record(file_path, topic_names=[], host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):\n '''record the topic data to the file\n '''\n class TopicRecorder(object):\n\n def __init__(self, file_path, topic_names):\n self._topic_names = topic_names\n self._file_path = file_path\n self._output = open(self._file_path, 'w')\n signal.signal(signal.SIGINT, self._handle_signal)\n signal.signal(signal.SIGTERM, self._handle_signal)\n header = {}\n header['topic_names'] = topic_names\n header['start_date'] = str(datetime.datetime.today())\n header_string = json.dumps({'header': header})\n tail_removed_header = header_string[0:-1]\n self._output.write(tail_removed_header + ',\\n')\n self._output.write(' \"data\": [\\n')\n self._has_no_data = True\n\n def callback(self, msg, topic):\n if self._output.closed:\n return\n raw_msg = '{topic} {msg}'.format(topic=topic, msg=msg)\n if not self._topic_names or topic in self._topic_names:\n if not self._has_no_data:\n self._output.write(',\\n')\n else:\n self._has_no_data = False\n self._output.write(json.dumps([time.time(), raw_msg]))\n\n def close(self):\n if not self._output.closed:\n self._output.write('\\n]}')\n self._output.close()\n\n def _handle_signal(self, signum, frame):\n self.close()\n sys.exit(0)\n\n writer = TopicRecorder(file_path, topic_names)\n sub = jps.Subscriber('*', writer.callback, host=host, sub_port=sub_port)\n sub.spin()\n writer.close()\n",
"def echo(topic_name, num_print=None, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):\n '''print the data for the given topic forever\n '''\n class PrintWithCount(object):\n\n def __init__(self, out):\n self._printed = 0\n self._out = out\n\n def print_and_increment(self, msg):\n self._out.write('{}\\n'.format(msg))\n self._printed += 1\n\n def get_count(self):\n return self._printed\n\n counter = PrintWithCount(out)\n sub = jps.Subscriber(\n topic_name, counter.print_and_increment, host=host, sub_port=sub_port)\n try:\n while num_print is None or counter.get_count() < num_print:\n sub.spin_once()\n time.sleep(0.0001)\n except KeyboardInterrupt:\n pass\n",
"def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):\n '''publishes the data to the topic\n\n :param topic_name: name of the topic\n :param json_msg: data to be published\n :param repeat_rate: if None, publishes once. if not None, it is used as [Hz].\n '''\n pub = jps.Publisher(topic_name, host=host, pub_port=pub_port)\n time.sleep(0.1)\n if repeat_rate is None:\n pub.publish(json_msg)\n else:\n try:\n while True:\n pub.publish(json_msg)\n time.sleep(1.0 / repeat_rate)\n except KeyboardInterrupt:\n pass\n",
"def show_list(timeout_in_sec, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):\n '''get the name list of the topics, and print it\n '''\n class TopicNameStore(object):\n\n def __init__(self):\n self._topic_names = set()\n\n def callback(self, msg, topic):\n self._topic_names.add(topic)\n\n def get_topic_names(self):\n names = list(self._topic_names)\n names.sort()\n return names\n\n store = TopicNameStore()\n sub = jps.Subscriber('*', store.callback, host=host, sub_port=sub_port)\n sleep_sec = 0.01\n for i in range(int(timeout_in_sec / sleep_sec)):\n sub.spin_once(sleep_sec)\n time.sleep(0.001) # for context switch\n for name in store.get_topic_names():\n out.write('{}\\n'.format(name))\n",
"def play(file_path, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):\n '''replay the recorded data by record()\n '''\n pub = jps.Publisher('*', host=host, pub_port=pub_port)\n time.sleep(0.2)\n last_time = None\n print('start publishing file {}'.format(file_path))\n with open(file_path, 'r') as f:\n # super hack to remove header\n f.readline()\n f.readline()\n for line in f:\n if line.startswith(']}'):\n break\n publish_time, raw_msg = json.loads(line.rstrip(',\\n'))\n if last_time is not None:\n time.sleep(publish_time - last_time)\n pub.publish(raw_msg.rstrip())\n last_time = publish_time\n print('fnished')\n"
] |
import argparse
import datetime
import jps
import json
import os
import signal
import sys
import time
def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''publishes the data to the topic
:param topic_name: name of the topic
:param json_msg: data to be published
:param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
'''
pub = jps.Publisher(topic_name, host=host, pub_port=pub_port)
time.sleep(0.1)
if repeat_rate is None:
pub.publish(json_msg)
else:
try:
while True:
pub.publish(json_msg)
time.sleep(1.0 / repeat_rate)
except KeyboardInterrupt:
pass
def echo(topic_name, num_print=None, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''print the data for the given topic forever
'''
class PrintWithCount(object):
def __init__(self, out):
self._printed = 0
self._out = out
def print_and_increment(self, msg):
self._out.write('{}\n'.format(msg))
self._printed += 1
def get_count(self):
return self._printed
counter = PrintWithCount(out)
sub = jps.Subscriber(
topic_name, counter.print_and_increment, host=host, sub_port=sub_port)
try:
while num_print is None or counter.get_count() < num_print:
sub.spin_once()
time.sleep(0.0001)
except KeyboardInterrupt:
pass
def show_list(timeout_in_sec, out=sys.stdout, host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''get the name list of the topics, and print it
'''
class TopicNameStore(object):
def __init__(self):
self._topic_names = set()
def callback(self, msg, topic):
self._topic_names.add(topic)
def get_topic_names(self):
names = list(self._topic_names)
names.sort()
return names
store = TopicNameStore()
sub = jps.Subscriber('*', store.callback, host=host, sub_port=sub_port)
sleep_sec = 0.01
for i in range(int(timeout_in_sec / sleep_sec)):
sub.spin_once(sleep_sec)
time.sleep(0.001) # for context switch
for name in store.get_topic_names():
out.write('{}\n'.format(name))
def record(file_path, topic_names=[], host=jps.env.get_master_host(), sub_port=jps.DEFAULT_SUB_PORT):
'''record the topic data to the file
'''
class TopicRecorder(object):
def __init__(self, file_path, topic_names):
self._topic_names = topic_names
self._file_path = file_path
self._output = open(self._file_path, 'w')
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
header = {}
header['topic_names'] = topic_names
header['start_date'] = str(datetime.datetime.today())
header_string = json.dumps({'header': header})
tail_removed_header = header_string[0:-1]
self._output.write(tail_removed_header + ',\n')
self._output.write(' "data": [\n')
self._has_no_data = True
def callback(self, msg, topic):
if self._output.closed:
return
raw_msg = '{topic} {msg}'.format(topic=topic, msg=msg)
if not self._topic_names or topic in self._topic_names:
if not self._has_no_data:
self._output.write(',\n')
else:
self._has_no_data = False
self._output.write(json.dumps([time.time(), raw_msg]))
def close(self):
if not self._output.closed:
self._output.write('\n]}')
self._output.close()
def _handle_signal(self, signum, frame):
self.close()
sys.exit(0)
writer = TopicRecorder(file_path, topic_names)
sub = jps.Subscriber('*', writer.callback, host=host, sub_port=sub_port)
sub.spin()
writer.close()
def play(file_path, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT):
'''replay the recorded data by record()
'''
pub = jps.Publisher('*', host=host, pub_port=pub_port)
time.sleep(0.2)
last_time = None
print('start publishing file {}'.format(file_path))
with open(file_path, 'r') as f:
# super hack to remove header
f.readline()
f.readline()
for line in f:
if line.startswith(']}'):
break
publish_time, raw_msg = json.loads(line.rstrip(',\n'))
if last_time is not None:
time.sleep(publish_time - last_time)
pub.publish(raw_msg.rstrip())
last_time = publish_time
print('fnished')
|
OTL/jps
|
jps/queue.py
|
main
|
python
|
def main(req_port=None, res_port=None, use_security=False):
'''main of queue
:param req_port: port for clients
:param res_port: port for servers
'''
if req_port is None:
req_port = env.get_req_port()
if res_port is None:
res_port = env.get_res_port()
auth = None
try:
context = zmq.Context()
frontend_service = context.socket(zmq.XREP)
backend_service = context.socket(zmq.XREQ)
if use_security:
if not os.path.exists(env.get_server_public_key_dir()):
create_certificates(env.get_server_public_key_dir())
auth = Authenticator.instance(env.get_server_public_key_dir())
auth.set_server_key(
frontend_service, env.get_server_secret_key_path())
auth.set_client_key(backend_service, env.get_client_secret_key_path(),
env.get_server_public_key_path())
frontend_service.bind('tcp://*:{req_port}'.format(req_port=req_port))
backend_service.bind('tcp://*:{res_port}'.format(res_port=res_port))
zmq.device(zmq.QUEUE, frontend_service, backend_service)
except KeyboardInterrupt:
pass
finally:
frontend_service.close()
backend_service.close()
context.term()
if use_security and auth is not None:
auth.stop()
|
main of queue
:param req_port: port for clients
:param res_port: port for servers
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/queue.py#L18-L51
|
[
"def create_certificates(keys_dir='certificates'):\n if not os.path.exists(keys_dir):\n os.mkdir(keys_dir)\n server_public_file, server_secret_file = zmq.auth.create_certificates(\n keys_dir, \"server\")\n client_public_file, client_secret_file = zmq.auth.create_certificates(\n keys_dir, \"client\")\n",
"def get_res_port():\n return os.environ.get('JPS_MASTER_RES_PORT', DEFAULT_RES_PORT)\n",
"def get_req_port():\n return os.environ.get('JPS_MASTER_REQ_PORT', DEFAULT_REQ_PORT)\n",
"def get_server_public_key_dir():\n return os.environ.get('JPS_SERVER_PUBLIC_KEY_DIR',\n 'certificates')\n",
"def instance(cls, public_keys_dir):\n '''Please avoid create multi instance'''\n if public_keys_dir in cls._authenticators:\n return cls._authenticators[public_keys_dir]\n new_instance = cls(public_keys_dir)\n cls._authenticators[public_keys_dir] = new_instance\n return new_instance\n"
] |
import os
import zmq
from .args import ArgumentParser
from . import env
from .security import Authenticator
from .security import create_certificates
def command():
parser = ArgumentParser(description='jps queue',
service=True, publisher=False, subscriber=False)
args = parser.parse_args()
main(args.request_port, args.response_port)
if __name__ == "__main__":
main()
|
OTL/jps
|
jps/forwarder.py
|
main
|
python
|
def main(pub_port=None, sub_port=None):
'''main of forwarder
:param sub_port: port for subscribers
:param pub_port: port for publishers
'''
try:
if sub_port is None:
sub_port = get_sub_port()
if pub_port is None:
pub_port = get_pub_port()
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
backend = context.socket(zmq.PUB)
frontend.bind('tcp://*:{pub_port}'.format(pub_port=pub_port))
frontend.setsockopt(zmq.SUBSCRIBE, b'')
backend.bind('tcp://*:{sub_port}'.format(sub_port=sub_port))
zmq.device(zmq.FORWARDER, frontend, backend)
except KeyboardInterrupt:
pass
finally:
frontend.close()
backend.close()
context.term()
|
main of forwarder
:param sub_port: port for subscribers
:param pub_port: port for publishers
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/forwarder.py#L13-L37
|
[
"def get_pub_port():\n return os.environ.get('JPS_MASTER_PUB_PORT', DEFAULT_PUB_PORT)\n",
"def get_sub_port():\n return os.environ.get('JPS_MASTER_SUB_PORT', DEFAULT_SUB_PORT)\n"
] |
import zmq
from .args import ArgumentParser
from .env import get_pub_port
from .env import get_sub_port
def command():
parser = ArgumentParser(description='jps forwarder')
args = parser.parse_args()
main(args.publisher_port, args.subscriber_port)
if __name__ == "__main__":
main()
|
OTL/jps
|
jps/service.py
|
ServiceServer.spin
|
python
|
def spin(self, use_thread=False):
'''call callback for all data forever (until \C-c)
:param use_thread: use thread for spin (do not block)
'''
if use_thread:
if self._thread is not None:
raise 'spin called twice'
self._thread = threading.Thread(target=self._spin_internal)
self._thread.setDaemon(True)
self._thread.start()
else:
self._spin_internal()
|
call callback for all data forever (until \C-c)
:param use_thread: use thread for spin (do not block)
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/service.py#L40-L52
|
[
"def _spin_internal(self):\n while True:\n self.spin_once()\n"
] |
class ServiceServer(object):
'''
Example:
>>> def callback(req):
... return 'req = {req}'.format(req=req)
...
>>> service = jps.ServiceServer(callback)
>>> service.spin()
'''
def __init__(self, callback, host=None, res_port=None, use_security=False):
if host is None:
host = env.get_master_host()
context = zmq.Context()
self._socket = context.socket(zmq.REP)
self._auth = None
if use_security:
self._auth = Authenticator.instance(
env.get_server_public_key_dir())
self._auth.set_server_key(
self._socket, env.get_server_secret_key_path())
if res_port is None:
res_port = env.get_res_port()
self._socket.connect(
'tcp://{host}:{port}'.format(host=host, port=res_port))
self._callback = callback
self._thread = None
self._lock = threading.Lock()
def _spin_internal(self):
while True:
self.spin_once()
def spin_once(self):
with self._lock:
request = self._socket.recv()
self._socket.send(cast_bytes(self._callback(request)))
def _stop_if_running(self):
if self._auth is not None:
self._auth.stop()
self._auth = None
if self._thread is not None:
self._thread.join(1.0)
self._thread = None
def close(self):
self._stop_if_running()
with self._lock:
self._socket.close()
def __del__(self):
self._stop_if_running()
|
OTL/jps
|
jps/security.py
|
Authenticator.instance
|
python
|
def instance(cls, public_keys_dir):
'''Please avoid create multi instance'''
if public_keys_dir in cls._authenticators:
return cls._authenticators[public_keys_dir]
new_instance = cls(public_keys_dir)
cls._authenticators[public_keys_dir] = new_instance
return new_instance
|
Please avoid create multi instance
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/security.py#L18-L24
| null |
class Authenticator(object):
_authenticators = {}
@classmethod
def __init__(self, public_keys_dir):
self._auth = ThreadAuthenticator(zmq.Context.instance())
self._auth.start()
self._auth.allow('*')
self._auth.configure_curve(domain='*', location=public_keys_dir)
def set_server_key(self, zmq_socket, server_secret_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, server_secret_key_path)
zmq_socket.curve_server = True
def set_client_key(self, zmq_socket, client_secret_key_path, server_public_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, client_secret_key_path)
server_public, _ = zmq.auth.load_certificate(server_public_key_path)
zmq_socket.curve_serverkey = server_public
def stop(self):
self._auth.stop()
|
OTL/jps
|
jps/security.py
|
Authenticator.set_server_key
|
python
|
def set_server_key(self, zmq_socket, server_secret_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, server_secret_key_path)
zmq_socket.curve_server = True
|
must call before bind
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/security.py#L32-L35
|
[
"def load_and_set_key(zmq_socket, key_path):\n public, secret = zmq.auth.load_certificate(key_path)\n zmq_socket.curve_secretkey = secret\n zmq_socket.curve_publickey = public\n"
] |
class Authenticator(object):
_authenticators = {}
@classmethod
def instance(cls, public_keys_dir):
'''Please avoid create multi instance'''
if public_keys_dir in cls._authenticators:
return cls._authenticators[public_keys_dir]
new_instance = cls(public_keys_dir)
cls._authenticators[public_keys_dir] = new_instance
return new_instance
def __init__(self, public_keys_dir):
self._auth = ThreadAuthenticator(zmq.Context.instance())
self._auth.start()
self._auth.allow('*')
self._auth.configure_curve(domain='*', location=public_keys_dir)
def set_client_key(self, zmq_socket, client_secret_key_path, server_public_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, client_secret_key_path)
server_public, _ = zmq.auth.load_certificate(server_public_key_path)
zmq_socket.curve_serverkey = server_public
def stop(self):
self._auth.stop()
|
OTL/jps
|
jps/security.py
|
Authenticator.set_client_key
|
python
|
def set_client_key(self, zmq_socket, client_secret_key_path, server_public_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, client_secret_key_path)
server_public, _ = zmq.auth.load_certificate(server_public_key_path)
zmq_socket.curve_serverkey = server_public
|
must call before bind
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/security.py#L37-L41
|
[
"def load_and_set_key(zmq_socket, key_path):\n public, secret = zmq.auth.load_certificate(key_path)\n zmq_socket.curve_secretkey = secret\n zmq_socket.curve_publickey = public\n"
] |
class Authenticator(object):
_authenticators = {}
@classmethod
def instance(cls, public_keys_dir):
'''Please avoid create multi instance'''
if public_keys_dir in cls._authenticators:
return cls._authenticators[public_keys_dir]
new_instance = cls(public_keys_dir)
cls._authenticators[public_keys_dir] = new_instance
return new_instance
def __init__(self, public_keys_dir):
self._auth = ThreadAuthenticator(zmq.Context.instance())
self._auth.start()
self._auth.allow('*')
self._auth.configure_curve(domain='*', location=public_keys_dir)
def set_server_key(self, zmq_socket, server_secret_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, server_secret_key_path)
zmq_socket.curve_server = True
def stop(self):
self._auth.stop()
|
OTL/jps
|
jps/subscriber.py
|
Subscriber.spin_once
|
python
|
def spin_once(self, polling_sec=0.010):
'''Read the queued data and call the callback for them.
You have to handle KeyboardInterrupt (\C-c) manually.
Example:
>>> def callback(msg):
... print msg
>>> sub = jps.Subscriber('topic_name', callback)
>>> try:
... while True:
... sub.spin_once():
... time.sleep(0.1)
... except KeyboardInterrupt:
... pass
'''
# parse all data
while True:
socks = dict(self._poller.poll(polling_sec * 1000))
if socks.get(self._socket) == zmq.POLLIN:
msg = self._socket.recv()
self._callback(msg)
else:
return
|
Read the queued data and call the callback for them.
You have to handle KeyboardInterrupt (\C-c) manually.
Example:
>>> def callback(msg):
... print msg
>>> sub = jps.Subscriber('topic_name', callback)
>>> try:
... while True:
... sub.spin_once():
... time.sleep(0.1)
... except KeyboardInterrupt:
... pass
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/subscriber.py#L102-L126
| null |
class Subscriber(object):
'''Subscribe the topic and call the callback function
Example:
>>> def callback(msg):
... print msg
...
>>> sub = jps.Subscriber('topic_name', callback)
>>> sub.spin()
or you can use python generator style
>>> import jps
>>> for msg in jps.Subscriber('/hoge1'):
... print msg
:param topic_name: topic name
:param host: host name of publisher/forwarder
:param sub_port: port of publisher/forwarder
:param deserializer: this function is applied after received (default: None)
'''
def __init__(self, topic_name, callback=None, host=None, sub_port=None,
deserializer='DEFAULT'):
topic_name = get_remapped_topic_name(topic_name)
if topic_name.count(' '):
raise Error('you can\'t use " " for topic_name')
if topic_name == '':
raise Error('empty topic name is not supported')
if host is None:
host = get_master_host()
if sub_port is None:
sub_port = get_sub_port()
if deserializer is 'DEFAULT':
deserializer = get_default_deserializer()
self._deserializer = deserializer
context = zmq.Context()
self._socket = context.socket(zmq.SUB)
self._socket.connect('tcp://{host}:{port}'.format(host=host,
port=sub_port))
self._topic = cast_bytes(topic_name + get_topic_suffix())
self._topic_without_star = self._topic.rstrip('*')
self._socket.setsockopt(zmq.SUBSCRIBE, self._topic_without_star)
self._user_callback = callback
if type(callback) == types.MethodType:
# arg=[self, message, topic_name]
self._user_callback_takes_topic_name = callback.im_func.func_code.co_argcount == 3
elif type(callback) == types.FunctionType:
# arg=[message, topic_name]
self._user_callback_takes_topic_name = callback.func_code.co_argcount == 2
elif hasattr(callback, '__call__'):
# arg=[self, message, topic_name]
self._user_callback_takes_topic_name = callback.__call__.im_func.func_code.co_argcount == 3
else:
self._user_callback_takes_topic_name = False
if type(callback) == types.InstanceType:
print 'argcoutn = ' + callback.func_code.co_argcount
self._thread = None
self._poller = zmq.Poller()
self._poller.register(self._socket, zmq.POLLIN)
def _strip_topic_name_if_not_wildcard(self, raw_msg):
topic, _, msg = raw_msg.partition(' ')
if self._topic != self._topic_without_star:
return (msg, topic)
elif topic == self._topic:
return (msg, topic)
return (None, topic)
def deserialize(self, msg):
if self._deserializer is not None:
return self._deserializer(msg)
return msg
def _callback(self, raw_msg):
if self._user_callback is None:
return
msg, topic_name = self._strip_topic_name_if_not_wildcard(raw_msg)
if msg is not None:
if self._user_callback_takes_topic_name:
self._user_callback(self.deserialize(msg), topic_name)
else:
self._user_callback(self.deserialize(msg))
def spin(self, use_thread=False):
'''call callback for all data forever (until \C-c)
:param use_thread: use thread for spin (do not block)
'''
if use_thread:
if self._thread is not None:
raise Error('spin called twice')
self._thread = threading.Thread(target=self._spin_internal)
self._thread.setDaemon(True)
self._thread.start()
else:
self._spin_internal()
def _spin_internal(self):
for msg in self:
if self._user_callback_takes_topic_name:
self._user_callback(*msg)
else:
self._user_callback(self.deserialize(msg))
def __iter__(self):
return self
def next(self):
'''receive next data (block until next data)'''
try:
raw_msg = self._socket.recv()
except KeyboardInterrupt:
raise StopIteration()
msg, topic_name = self._strip_topic_name_if_not_wildcard(raw_msg)
if msg is None:
return self.next()
if self._user_callback_takes_topic_name:
return (self.deserialize(msg), topic_name)
else:
return self.deserialize(msg)
# for python3
__next__ = next
|
OTL/jps
|
jps/subscriber.py
|
Subscriber.next
|
python
|
def next(self):
'''receive next data (block until next data)'''
try:
raw_msg = self._socket.recv()
except KeyboardInterrupt:
raise StopIteration()
msg, topic_name = self._strip_topic_name_if_not_wildcard(raw_msg)
if msg is None:
return self.next()
if self._user_callback_takes_topic_name:
return (self.deserialize(msg), topic_name)
else:
return self.deserialize(msg)
|
receive next data (block until next data)
|
train
|
https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/subscriber.py#L152-L164
|
[
"def _strip_topic_name_if_not_wildcard(self, raw_msg):\n topic, _, msg = raw_msg.partition(' ')\n if self._topic != self._topic_without_star:\n return (msg, topic)\n elif topic == self._topic:\n return (msg, topic)\n return (None, topic)\n",
"def deserialize(self, msg):\n if self._deserializer is not None:\n return self._deserializer(msg)\n return msg\n",
"def next(self):\n '''receive next data (block until next data)'''\n try:\n raw_msg = self._socket.recv()\n except KeyboardInterrupt:\n raise StopIteration()\n msg, topic_name = self._strip_topic_name_if_not_wildcard(raw_msg)\n if msg is None:\n return self.next()\n if self._user_callback_takes_topic_name:\n return (self.deserialize(msg), topic_name)\n else:\n return self.deserialize(msg)\n"
] |
class Subscriber(object):
'''Subscribe the topic and call the callback function
Example:
>>> def callback(msg):
... print msg
...
>>> sub = jps.Subscriber('topic_name', callback)
>>> sub.spin()
or you can use python generator style
>>> import jps
>>> for msg in jps.Subscriber('/hoge1'):
... print msg
:param topic_name: topic name
:param host: host name of publisher/forwarder
:param sub_port: port of publisher/forwarder
:param deserializer: this function is applied after received (default: None)
'''
def __init__(self, topic_name, callback=None, host=None, sub_port=None,
deserializer='DEFAULT'):
topic_name = get_remapped_topic_name(topic_name)
if topic_name.count(' '):
raise Error('you can\'t use " " for topic_name')
if topic_name == '':
raise Error('empty topic name is not supported')
if host is None:
host = get_master_host()
if sub_port is None:
sub_port = get_sub_port()
if deserializer is 'DEFAULT':
deserializer = get_default_deserializer()
self._deserializer = deserializer
context = zmq.Context()
self._socket = context.socket(zmq.SUB)
self._socket.connect('tcp://{host}:{port}'.format(host=host,
port=sub_port))
self._topic = cast_bytes(topic_name + get_topic_suffix())
self._topic_without_star = self._topic.rstrip('*')
self._socket.setsockopt(zmq.SUBSCRIBE, self._topic_without_star)
self._user_callback = callback
if type(callback) == types.MethodType:
# arg=[self, message, topic_name]
self._user_callback_takes_topic_name = callback.im_func.func_code.co_argcount == 3
elif type(callback) == types.FunctionType:
# arg=[message, topic_name]
self._user_callback_takes_topic_name = callback.func_code.co_argcount == 2
elif hasattr(callback, '__call__'):
# arg=[self, message, topic_name]
self._user_callback_takes_topic_name = callback.__call__.im_func.func_code.co_argcount == 3
else:
self._user_callback_takes_topic_name = False
if type(callback) == types.InstanceType:
print 'argcoutn = ' + callback.func_code.co_argcount
self._thread = None
self._poller = zmq.Poller()
self._poller.register(self._socket, zmq.POLLIN)
def _strip_topic_name_if_not_wildcard(self, raw_msg):
topic, _, msg = raw_msg.partition(' ')
if self._topic != self._topic_without_star:
return (msg, topic)
elif topic == self._topic:
return (msg, topic)
return (None, topic)
def deserialize(self, msg):
if self._deserializer is not None:
return self._deserializer(msg)
return msg
def _callback(self, raw_msg):
if self._user_callback is None:
return
msg, topic_name = self._strip_topic_name_if_not_wildcard(raw_msg)
if msg is not None:
if self._user_callback_takes_topic_name:
self._user_callback(self.deserialize(msg), topic_name)
else:
self._user_callback(self.deserialize(msg))
def spin_once(self, polling_sec=0.010):
'''Read the queued data and call the callback for them.
You have to handle KeyboardInterrupt (\C-c) manually.
Example:
>>> def callback(msg):
... print msg
>>> sub = jps.Subscriber('topic_name', callback)
>>> try:
... while True:
... sub.spin_once():
... time.sleep(0.1)
... except KeyboardInterrupt:
... pass
'''
# parse all data
while True:
socks = dict(self._poller.poll(polling_sec * 1000))
if socks.get(self._socket) == zmq.POLLIN:
msg = self._socket.recv()
self._callback(msg)
else:
return
def spin(self, use_thread=False):
'''call callback for all data forever (until \C-c)
:param use_thread: use thread for spin (do not block)
'''
if use_thread:
if self._thread is not None:
raise Error('spin called twice')
self._thread = threading.Thread(target=self._spin_internal)
self._thread.setDaemon(True)
self._thread.start()
else:
self._spin_internal()
def _spin_internal(self):
for msg in self:
if self._user_callback_takes_topic_name:
self._user_callback(*msg)
else:
self._user_callback(self.deserialize(msg))
def __iter__(self):
return self
# for python3
__next__ = next
|
basilfx/flask-daapserver
|
daapserver/bonjour.py
|
Bonjour.publish
|
python
|
def publish(self, daap_server, preferred_database=None):
if daap_server in self.daap_servers:
self.unpublish(daap_server)
# Zeroconf can advertise the information for one database only. Since
# the protocol supports multiple database, let the user decide which
# database to advertise. If none is specified, take the first one.
provider = daap_server.provider
try:
if preferred_database is not None:
database = provider.server.databases[preferred_database]
else:
database = provider.server.databases.values()[0]
except LookupError:
# The server may not have any databases (yet).
return
# The IP 0.0.0.0 tells this server to bind to all interfaces. However,
# Bonjour advertises itself to others, so others need an actual IP.
# There is definately a better way, but it works.
if daap_server.ip == "0.0.0.0":
addresses = []
for address in zeroconf.get_all_addresses(socket.AF_INET):
if not address == "127.0.0.1":
addresses.append(socket.inet_aton(address))
else:
addresses = [socket.inet_aton(daap_server.ip)]
# Determine machine ID and database ID, depending on the provider. If
# the provider has no support for persistent IDs, generate a random
# ID.
if provider.supports_persistent_id:
machine_id = hex(provider.server.persistent_id)
database_id = hex(database.persistent_id)
else:
machine_id = hex(generate_persistent_id())
database_id = hex(generate_persistent_id())
# iTunes 11+ uses more properties, but this seems to be sufficient.
description = {
"txtvers": "1",
"Password": str(int(bool(daap_server.password))),
"Machine Name": provider.server.name,
"Machine ID": machine_id.upper(),
"Database ID": database_id.upper()
}
# Test is zeroconf supports multiple addresses or not. For
# compatibility with zeroconf 0.17.3 or less.
if not hasattr(zeroconf.ServiceInfo("", ""), "addresses"):
addresses = addresses[0]
self.daap_servers[daap_server] = zeroconf.ServiceInfo(
type="_daap._tcp.local.",
name=provider.server.name + "._daap._tcp.local.",
address=addresses,
port=daap_server.port,
properties=description)
self.zeroconf.register_service(self.daap_servers[daap_server])
|
Publish a given `DAAPServer` instance.
The given instances should be fully configured, including the provider.
By default Zeroconf only advertises the first database, but the DAAP
protocol has support for multiple databases. Therefore, the parameter
`preferred_database` can be set to choose which database ID will be
served.
If the provider is not fully configured (in other words, if the
preferred database cannot be found), this method will not publish this
server. In this case, simply call this method again when the provider
is ready.
If the server was already published, it will be unpublished first.
:param DAAPServer daap_server: DAAP Server instance to publish.
:param int preferred_database: ID of the database to advertise.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/bonjour.py#L21-L101
|
[
"def generate_persistent_id():\n \"\"\"\n Generate a persistent ID. This ID is used in the DAAP protocol to uniquely\n identify objects when they are created.\n\n :return: A 64-bit random integer\n :rtype: int\n \"\"\"\n\n return ctypes.c_long(uuid.uuid1().int >> 64).value\n",
"def unpublish(self, daap_server):\n \"\"\"\n Unpublish a given server.\n\n If the server was not published, this method will not do anything.\n\n :param DAAPServer daap_server: DAAP Server instance to publish.\n \"\"\"\n\n if daap_server not in self.daap_servers:\n return\n\n self.zeroconf.unregister_service(self.daap_servers[daap_server])\n\n del self.daap_servers[daap_server]\n"
] |
class Bonjour(object):
"""
DAAPServer Bonjour/Zeroconf handler.
"""
def __init__(self):
"""
Construct a new Bonjour/Zeroconf server. This server takes `DAAPServer`
instances and advertises them.
"""
self.zeroconf = zeroconf.Zeroconf(zeroconf.InterfaceChoice.All)
self.daap_servers = {}
def unpublish(self, daap_server):
"""
Unpublish a given server.
If the server was not published, this method will not do anything.
:param DAAPServer daap_server: DAAP Server instance to publish.
"""
if daap_server not in self.daap_servers:
return
self.zeroconf.unregister_service(self.daap_servers[daap_server])
del self.daap_servers[daap_server]
def close(self):
"""
Close the Zeroconf instance.
"""
self.zeroconf.close()
|
basilfx/flask-daapserver
|
daapserver/bonjour.py
|
Bonjour.unpublish
|
python
|
def unpublish(self, daap_server):
if daap_server not in self.daap_servers:
return
self.zeroconf.unregister_service(self.daap_servers[daap_server])
del self.daap_servers[daap_server]
|
Unpublish a given server.
If the server was not published, this method will not do anything.
:param DAAPServer daap_server: DAAP Server instance to publish.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/bonjour.py#L103-L117
| null |
class Bonjour(object):
"""
DAAPServer Bonjour/Zeroconf handler.
"""
def __init__(self):
"""
Construct a new Bonjour/Zeroconf server. This server takes `DAAPServer`
instances and advertises them.
"""
self.zeroconf = zeroconf.Zeroconf(zeroconf.InterfaceChoice.All)
self.daap_servers = {}
def publish(self, daap_server, preferred_database=None):
"""
Publish a given `DAAPServer` instance.
The given instances should be fully configured, including the provider.
By default Zeroconf only advertises the first database, but the DAAP
protocol has support for multiple databases. Therefore, the parameter
`preferred_database` can be set to choose which database ID will be
served.
If the provider is not fully configured (in other words, if the
preferred database cannot be found), this method will not publish this
server. In this case, simply call this method again when the provider
is ready.
If the server was already published, it will be unpublished first.
:param DAAPServer daap_server: DAAP Server instance to publish.
:param int preferred_database: ID of the database to advertise.
"""
if daap_server in self.daap_servers:
self.unpublish(daap_server)
# Zeroconf can advertise the information for one database only. Since
# the protocol supports multiple database, let the user decide which
# database to advertise. If none is specified, take the first one.
provider = daap_server.provider
try:
if preferred_database is not None:
database = provider.server.databases[preferred_database]
else:
database = provider.server.databases.values()[0]
except LookupError:
# The server may not have any databases (yet).
return
# The IP 0.0.0.0 tells this server to bind to all interfaces. However,
# Bonjour advertises itself to others, so others need an actual IP.
# There is definately a better way, but it works.
if daap_server.ip == "0.0.0.0":
addresses = []
for address in zeroconf.get_all_addresses(socket.AF_INET):
if not address == "127.0.0.1":
addresses.append(socket.inet_aton(address))
else:
addresses = [socket.inet_aton(daap_server.ip)]
# Determine machine ID and database ID, depending on the provider. If
# the provider has no support for persistent IDs, generate a random
# ID.
if provider.supports_persistent_id:
machine_id = hex(provider.server.persistent_id)
database_id = hex(database.persistent_id)
else:
machine_id = hex(generate_persistent_id())
database_id = hex(generate_persistent_id())
# iTunes 11+ uses more properties, but this seems to be sufficient.
description = {
"txtvers": "1",
"Password": str(int(bool(daap_server.password))),
"Machine Name": provider.server.name,
"Machine ID": machine_id.upper(),
"Database ID": database_id.upper()
}
# Test is zeroconf supports multiple addresses or not. For
# compatibility with zeroconf 0.17.3 or less.
if not hasattr(zeroconf.ServiceInfo("", ""), "addresses"):
addresses = addresses[0]
self.daap_servers[daap_server] = zeroconf.ServiceInfo(
type="_daap._tcp.local.",
name=provider.server.name + "._daap._tcp.local.",
address=addresses,
port=daap_server.port,
properties=description)
self.zeroconf.register_service(self.daap_servers[daap_server])
def close(self):
"""
Close the Zeroconf instance.
"""
self.zeroconf.close()
|
basilfx/flask-daapserver
|
daapserver/__init__.py
|
DaapServer.serve_forever
|
python
|
def serve_forever(self):
# Verify that the provider has a server.
if self.provider.server is None:
raise ValueError(
"Cannot start server because the provider has no server to "
"publish.")
# Verify that the provider has a database to advertise.
if not self.provider.server.databases:
raise ValueError(
"Cannot start server because the provider has no databases to "
"publish.")
# Create WSGI server and run it.
self.server = WSGIServer((self.ip, self.port), application=self.app)
# Register Bonjour.
if self.bonjour:
self.bonjour.publish(self)
# Start server until finished
try:
self.server.serve_forever()
except KeyboardInterrupt:
pass
finally:
# Unregister Bonjour
if self.bonjour:
self.bonjour.unpublish(self)
|
Run the DAAP server. Start by advertising the server via Bonjour. Then
serve requests until CTRL + C is received.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/__init__.py#L37-L70
| null |
class DaapServer(object):
"""
DAAP Server instance. Combine all components from this module in a ready
to use class. This class uses a gevent-based event loop.
"""
def __init__(self, provider, password=None, ip="0.0.0.0", port=3689,
cache=True, cache_timeout=3600, bonjour=True, debug=False):
"""
Construct a new DAAP Server.
"""
self.provider = provider
self.password = password
self.ip = ip
self.port = port
self.cache = cache
self.cache_timeout = cache_timeout
self.bonjour = Bonjour() if bonjour else None
self.debug = debug
# Create DAAP server app
self.app = create_server_app(
self.provider, self.password, self.cache, self.cache_timeout,
self.debug)
def stop(self):
"""
Stop the server.
"""
self.server.stop()
|
basilfx/flask-daapserver
|
daapserver/utils.py
|
diff
|
python
|
def diff(new, old):
if old is not None:
is_update = True
removed = set(new.removed(old))
updated = set(new.updated(old))
else:
is_update = False
updated = new
removed = set()
return updated, removed, is_update
|
Compute the difference in items of two revisioned collections. If only
`new' is specified, it is assumed it is not an update. If both are set,
the removed items are returned first. Otherwise, the updated and edited
ones are returned.
:param set new: Set of new objects
:param set old: Set of old objects
:return: A tuple consisting of `(added, removed, is_update)`.
:rtype: tuple
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/utils.py#L6-L30
| null |
import sys
import uuid
import ctypes
def generate_persistent_id():
"""
Generate a persistent ID. This ID is used in the DAAP protocol to uniquely
identify objects when they are created.
:return: A 64-bit random integer
:rtype: int
"""
return ctypes.c_long(uuid.uuid1().int >> 64).value
def parse_byte_range(byte_range, min_byte=0, max_byte=sys.maxint):
"""
Parse and validate a byte range. A byte range is a tuple of (begin, end)
indices. `begin' should be smaller than `end', and both should fall within
the `min_byte' and `max_byte'.
In case of a violation, a `ValueError` is raised.
"""
if not byte_range:
return min_byte, max_byte
begin = byte_range[0] or min_byte
end = byte_range[1] or max_byte
if end < begin:
raise ValueError("End before begin")
if begin < min_byte:
raise ValueError("Begin smaller than min")
if end > max_byte:
raise ValueError("End larger than max")
return begin, end
def to_tree(instance, *children):
"""
Generate tree structure of an instance, and its children. This method
yields its results, instead of returning them.
"""
# Yield representation of self
yield unicode(instance)
# Iterate trough each instance child collection
for i, child in enumerate(children):
lines = 0
yield "|"
yield "+---" + unicode(child)
if i != len(children) - 1:
a = "|"
else:
a = " "
# Iterate trough all values of collection of child
for j, item in enumerate(child.itervalues()):
if j != len(child) - 1:
b = "|"
else:
b = " "
if j == 0:
yield a + " |"
# Append prefix to each line
for k, line in enumerate(item.to_tree()):
lines += 1
if k == 0:
yield a + " +---" + line
else:
yield a + " " + b + " " + line
# Add extra space if required
if len(children) > 1 and i == len(children) - 1 and lines > 1:
yield a
def invoke_hooks(hooks, name, *args, **kwargs):
"""
Invoke one or more hooks that have been registered under `name'. Additional
arguments and keyword arguments can be provided.
There is no exception catching, so if a hook fails, it will disrupt the
chain and/or rest of program.
"""
callbacks = hooks.get(name, [])
for callback in callbacks:
callback(*args, **kwargs)
|
basilfx/flask-daapserver
|
daapserver/utils.py
|
parse_byte_range
|
python
|
def parse_byte_range(byte_range, min_byte=0, max_byte=sys.maxint):
if not byte_range:
return min_byte, max_byte
begin = byte_range[0] or min_byte
end = byte_range[1] or max_byte
if end < begin:
raise ValueError("End before begin")
if begin < min_byte:
raise ValueError("Begin smaller than min")
if end > max_byte:
raise ValueError("End larger than max")
return begin, end
|
Parse and validate a byte range. A byte range is a tuple of (begin, end)
indices. `begin' should be smaller than `end', and both should fall within
the `min_byte' and `max_byte'.
In case of a violation, a `ValueError` is raised.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/utils.py#L45-L69
| null |
import sys
import uuid
import ctypes
def diff(new, old):
"""
Compute the difference in items of two revisioned collections. If only
`new' is specified, it is assumed it is not an update. If both are set,
the removed items are returned first. Otherwise, the updated and edited
ones are returned.
:param set new: Set of new objects
:param set old: Set of old objects
:return: A tuple consisting of `(added, removed, is_update)`.
:rtype: tuple
"""
if old is not None:
is_update = True
removed = set(new.removed(old))
updated = set(new.updated(old))
else:
is_update = False
updated = new
removed = set()
return updated, removed, is_update
def generate_persistent_id():
"""
Generate a persistent ID. This ID is used in the DAAP protocol to uniquely
identify objects when they are created.
:return: A 64-bit random integer
:rtype: int
"""
return ctypes.c_long(uuid.uuid1().int >> 64).value
def to_tree(instance, *children):
"""
Generate tree structure of an instance, and its children. This method
yields its results, instead of returning them.
"""
# Yield representation of self
yield unicode(instance)
# Iterate trough each instance child collection
for i, child in enumerate(children):
lines = 0
yield "|"
yield "+---" + unicode(child)
if i != len(children) - 1:
a = "|"
else:
a = " "
# Iterate trough all values of collection of child
for j, item in enumerate(child.itervalues()):
if j != len(child) - 1:
b = "|"
else:
b = " "
if j == 0:
yield a + " |"
# Append prefix to each line
for k, line in enumerate(item.to_tree()):
lines += 1
if k == 0:
yield a + " +---" + line
else:
yield a + " " + b + " " + line
# Add extra space if required
if len(children) > 1 and i == len(children) - 1 and lines > 1:
yield a
def invoke_hooks(hooks, name, *args, **kwargs):
"""
Invoke one or more hooks that have been registered under `name'. Additional
arguments and keyword arguments can be provided.
There is no exception catching, so if a hook fails, it will disrupt the
chain and/or rest of program.
"""
callbacks = hooks.get(name, [])
for callback in callbacks:
callback(*args, **kwargs)
|
basilfx/flask-daapserver
|
daapserver/utils.py
|
to_tree
|
python
|
def to_tree(instance, *children):
# Yield representation of self
yield unicode(instance)
# Iterate trough each instance child collection
for i, child in enumerate(children):
lines = 0
yield "|"
yield "+---" + unicode(child)
if i != len(children) - 1:
a = "|"
else:
a = " "
# Iterate trough all values of collection of child
for j, item in enumerate(child.itervalues()):
if j != len(child) - 1:
b = "|"
else:
b = " "
if j == 0:
yield a + " |"
# Append prefix to each line
for k, line in enumerate(item.to_tree()):
lines += 1
if k == 0:
yield a + " +---" + line
else:
yield a + " " + b + " " + line
# Add extra space if required
if len(children) > 1 and i == len(children) - 1 and lines > 1:
yield a
|
Generate tree structure of an instance, and its children. This method
yields its results, instead of returning them.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/utils.py#L72-L114
| null |
import sys
import uuid
import ctypes
def diff(new, old):
"""
Compute the difference in items of two revisioned collections. If only
`new' is specified, it is assumed it is not an update. If both are set,
the removed items are returned first. Otherwise, the updated and edited
ones are returned.
:param set new: Set of new objects
:param set old: Set of old objects
:return: A tuple consisting of `(added, removed, is_update)`.
:rtype: tuple
"""
if old is not None:
is_update = True
removed = set(new.removed(old))
updated = set(new.updated(old))
else:
is_update = False
updated = new
removed = set()
return updated, removed, is_update
def generate_persistent_id():
"""
Generate a persistent ID. This ID is used in the DAAP protocol to uniquely
identify objects when they are created.
:return: A 64-bit random integer
:rtype: int
"""
return ctypes.c_long(uuid.uuid1().int >> 64).value
def parse_byte_range(byte_range, min_byte=0, max_byte=sys.maxint):
"""
Parse and validate a byte range. A byte range is a tuple of (begin, end)
indices. `begin' should be smaller than `end', and both should fall within
the `min_byte' and `max_byte'.
In case of a violation, a `ValueError` is raised.
"""
if not byte_range:
return min_byte, max_byte
begin = byte_range[0] or min_byte
end = byte_range[1] or max_byte
if end < begin:
raise ValueError("End before begin")
if begin < min_byte:
raise ValueError("Begin smaller than min")
if end > max_byte:
raise ValueError("End larger than max")
return begin, end
def invoke_hooks(hooks, name, *args, **kwargs):
"""
Invoke one or more hooks that have been registered under `name'. Additional
arguments and keyword arguments can be provided.
There is no exception catching, so if a hook fails, it will disrupt the
chain and/or rest of program.
"""
callbacks = hooks.get(name, [])
for callback in callbacks:
callback(*args, **kwargs)
|
basilfx/flask-daapserver
|
daapserver/utils.py
|
invoke_hooks
|
python
|
def invoke_hooks(hooks, name, *args, **kwargs):
callbacks = hooks.get(name, [])
for callback in callbacks:
callback(*args, **kwargs)
|
Invoke one or more hooks that have been registered under `name'. Additional
arguments and keyword arguments can be provided.
There is no exception catching, so if a hook fails, it will disrupt the
chain and/or rest of program.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/utils.py#L117-L129
| null |
import sys
import uuid
import ctypes
def diff(new, old):
"""
Compute the difference in items of two revisioned collections. If only
`new' is specified, it is assumed it is not an update. If both are set,
the removed items are returned first. Otherwise, the updated and edited
ones are returned.
:param set new: Set of new objects
:param set old: Set of old objects
:return: A tuple consisting of `(added, removed, is_update)`.
:rtype: tuple
"""
if old is not None:
is_update = True
removed = set(new.removed(old))
updated = set(new.updated(old))
else:
is_update = False
updated = new
removed = set()
return updated, removed, is_update
def generate_persistent_id():
"""
Generate a persistent ID. This ID is used in the DAAP protocol to uniquely
identify objects when they are created.
:return: A 64-bit random integer
:rtype: int
"""
return ctypes.c_long(uuid.uuid1().int >> 64).value
def parse_byte_range(byte_range, min_byte=0, max_byte=sys.maxint):
"""
Parse and validate a byte range. A byte range is a tuple of (begin, end)
indices. `begin' should be smaller than `end', and both should fall within
the `min_byte' and `max_byte'.
In case of a violation, a `ValueError` is raised.
"""
if not byte_range:
return min_byte, max_byte
begin = byte_range[0] or min_byte
end = byte_range[1] or max_byte
if end < begin:
raise ValueError("End before begin")
if begin < min_byte:
raise ValueError("Begin smaller than min")
if end > max_byte:
raise ValueError("End larger than max")
return begin, end
def to_tree(instance, *children):
"""
Generate tree structure of an instance, and its children. This method
yields its results, instead of returning them.
"""
# Yield representation of self
yield unicode(instance)
# Iterate trough each instance child collection
for i, child in enumerate(children):
lines = 0
yield "|"
yield "+---" + unicode(child)
if i != len(children) - 1:
a = "|"
else:
a = " "
# Iterate trough all values of collection of child
for j, item in enumerate(child.itervalues()):
if j != len(child) - 1:
b = "|"
else:
b = " "
if j == 0:
yield a + " |"
# Append prefix to each line
for k, line in enumerate(item.to_tree()):
lines += 1
if k == 0:
yield a + " +---" + line
else:
yield a + " " + b + " " + line
# Add extra space if required
if len(children) > 1 and i == len(children) - 1 and lines > 1:
yield a
|
basilfx/flask-daapserver
|
daapserver/provider.py
|
Provider.create_session
|
python
|
def create_session(self, user_agent, remote_address, client_version):
self.session_counter += 1
self.sessions[self.session_counter] = session = self.session_class()
# Set session properties
session.user_agent = user_agent
session.remote_address = remote_address
session.client_version = client_version
# Invoke hooks
invoke_hooks(self.hooks, "session_created", self.session_counter)
return self.session_counter
|
Create a new session.
:param str user_agent: Client user agent
:param str remote_addr: Remote address of client
:param str client_version: Remote client version
:return: The new session id
:rtype: int
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/provider.py#L96-L118
|
[
"def invoke_hooks(hooks, name, *args, **kwargs):\n \"\"\"\n Invoke one or more hooks that have been registered under `name'. Additional\n arguments and keyword arguments can be provided.\n\n There is no exception catching, so if a hook fails, it will disrupt the\n chain and/or rest of program.\n \"\"\"\n\n callbacks = hooks.get(name, [])\n\n for callback in callbacks:\n callback(*args, **kwargs)\n"
] |
class Provider(object):
"""
Base provider implementation. A provider is responsible for serving the
data to the client. This class should be subclassed.
"""
# Class type to use for sessions
session_class = Session
# Whether to artwork is supported
supports_artwork = False
# Whether persistent IDs are supported
supports_persistent_id = False
def __init__(self):
"""
Create a new Provider. This method should be invoked from the subclass.
"""
self.revision = 1
self.server = None
self.sessions = {}
self.session_counter = 0
self.hooks = {
"session_created": [],
"session_destroyed": [],
"updated": []
}
self.lock = gevent.lock.Semaphore()
self.next_revision_available = gevent.event.Event()
def destroy_session(self, session_id):
"""
Destroy an (existing) session.
"""
try:
del self.sessions[session_id]
except KeyError:
pass
# Invoke hooks
invoke_hooks(self.hooks, "session_destroyed", session_id)
def get_next_revision(self, session_id, revision, delta):
"""
Determine the next revision number for a given session id, revision
and delta.
In case the client is up-to-date, this method will block until the next
revision is available.
:param int session_id: Session identifier
:param int revision: Client revision number
:param int delta: Client revision delta (old client version number)
:return: Next revision number
:rtype: int
"""
session = self.sessions[session_id]
session.state = State.connected
if delta == revision:
# Increment revision. Never decrement.
session.revision = max(session.revision, revision)
# Wait for next revision to become ready.
self.next_revision_available.wait()
return self.revision
def update(self):
"""
Update this provider. Should be invoked when the server gets updated.
This method will notify all clients that wait for
`self.next_revision_available`.
"""
with self.lock:
# Increment revision and commit it.
self.revision += 1
self.server.commit(self.revision + 1)
# Unblock all waiting clients.
self.next_revision_available.set()
self.next_revision_available.clear()
# Check sessions to see which revision can be removed.
if self.sessions:
lowest_revision = min(
session.revision for session in self.sessions.itervalues())
# Remove all old revision history
if lowest_revision == self.revision:
self.server.clean(lowest_revision)
# Invoke hooks
invoke_hooks(self.hooks, "updated", self.revision)
def get_databases(self, session_id, revision, delta):
"""
"""
if delta == 0:
new = self.server.databases
old = None
else:
new = self.server.databases(revision)
old = self.server.databases(delta)
return new, old
def get_containers(self, session_id, database_id, revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.containers
old = None
else:
new = self.server \
.databases[database_id] \
.containers(revision)
old = self.server \
.databases[database_id] \
.containers(delta)
return new, old
def get_container_items(self, session_id, database_id, container_id,
revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items
old = None
else:
new = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items(revision)
old = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items(delta)
return new, old
def get_items(self, session_id, database_id, revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.items
old = None
else:
new = self.server \
.databases[database_id] \
.items(revision)
old = self.server \
.databases[database_id] \
.items(delta)
return new, old
def get_item(self, session_id, database_id, item_id, byte_range=None):
"""
"""
def _inner(data):
# Change state to streaming
session.state = State.streaming
try:
# Yield data
if isinstance(data, basestring):
yield data
else:
for chunk in data:
yield chunk
finally:
# Change state back to connected, even if an exception is
# raised.
session.state = State.connected
session = self.sessions[session_id]
item = self.server.databases[database_id].items[item_id]
# Increment counter for statistics. Make a distinction between requests
# with a byte range (play-pause) and ones without.
session.increment_counter("items")
if byte_range is None:
session.increment_counter("items_unique")
data, mimetype, size = self.get_item_data(session, item, byte_range)
return _inner(data), mimetype, size
def get_artwork(self, session_id, database_id, item_id):
"""
"""
session = self.sessions[session_id]
item = self.server.databases[database_id].items[item_id]
# Increment counter for statistics
session.increment_counter("artworks")
return self.get_artwork_data(session, item)
def get_item_data(self, session, item, byte_range=None):
"""
Fetch the requested item. The result can be an iterator, file
descriptor, or just raw bytes. Optionally, a begin and/or end range can
be specified.
The result should be an tuple, of the form (data, mimetype, size). The
data can be an iterator, file descriptor or raw bytes. In case a range
is requested, add a fourth tuple item, length. The length should be the
size of the requested data that is being returned.
Note: this method requires `Provider.supports_artwork = True`
:param Session session: Client session
:param Item item: Requested item.
:param tuple byte_range: Optional byte range to return a part of the
file.
:return: File descriptor, iterator or raw bytes.
"""
raise NotImplementedError("Needs to be overridden.")
def get_artwork_data(self, session, item):
"""
Fetch artwork for the requested item.
The result should be an tuple, of the form (data, mimetype, size). The
data can be an iterator, file descriptor or raw bytes.
Note: this method requires `Provider.supports_artwork = True`
:param Session session: Client session
:param Item item: Requested item.
:return: File descriptor, iterator or raw bytes.
"""
raise NotImplementedError("Needs to be overridden.")
|
basilfx/flask-daapserver
|
daapserver/provider.py
|
Provider.destroy_session
|
python
|
def destroy_session(self, session_id):
try:
del self.sessions[session_id]
except KeyError:
pass
# Invoke hooks
invoke_hooks(self.hooks, "session_destroyed", session_id)
|
Destroy an (existing) session.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/provider.py#L120-L131
|
[
"def invoke_hooks(hooks, name, *args, **kwargs):\n \"\"\"\n Invoke one or more hooks that have been registered under `name'. Additional\n arguments and keyword arguments can be provided.\n\n There is no exception catching, so if a hook fails, it will disrupt the\n chain and/or rest of program.\n \"\"\"\n\n callbacks = hooks.get(name, [])\n\n for callback in callbacks:\n callback(*args, **kwargs)\n"
] |
class Provider(object):
"""
Base provider implementation. A provider is responsible for serving the
data to the client. This class should be subclassed.
"""
# Class type to use for sessions
session_class = Session
# Whether to artwork is supported
supports_artwork = False
# Whether persistent IDs are supported
supports_persistent_id = False
def __init__(self):
"""
Create a new Provider. This method should be invoked from the subclass.
"""
self.revision = 1
self.server = None
self.sessions = {}
self.session_counter = 0
self.hooks = {
"session_created": [],
"session_destroyed": [],
"updated": []
}
self.lock = gevent.lock.Semaphore()
self.next_revision_available = gevent.event.Event()
def create_session(self, user_agent, remote_address, client_version):
"""
Create a new session.
:param str user_agent: Client user agent
:param str remote_addr: Remote address of client
:param str client_version: Remote client version
:return: The new session id
:rtype: int
"""
self.session_counter += 1
self.sessions[self.session_counter] = session = self.session_class()
# Set session properties
session.user_agent = user_agent
session.remote_address = remote_address
session.client_version = client_version
# Invoke hooks
invoke_hooks(self.hooks, "session_created", self.session_counter)
return self.session_counter
def get_next_revision(self, session_id, revision, delta):
"""
Determine the next revision number for a given session id, revision
and delta.
In case the client is up-to-date, this method will block until the next
revision is available.
:param int session_id: Session identifier
:param int revision: Client revision number
:param int delta: Client revision delta (old client version number)
:return: Next revision number
:rtype: int
"""
session = self.sessions[session_id]
session.state = State.connected
if delta == revision:
# Increment revision. Never decrement.
session.revision = max(session.revision, revision)
# Wait for next revision to become ready.
self.next_revision_available.wait()
return self.revision
def update(self):
"""
Update this provider. Should be invoked when the server gets updated.
This method will notify all clients that wait for
`self.next_revision_available`.
"""
with self.lock:
# Increment revision and commit it.
self.revision += 1
self.server.commit(self.revision + 1)
# Unblock all waiting clients.
self.next_revision_available.set()
self.next_revision_available.clear()
# Check sessions to see which revision can be removed.
if self.sessions:
lowest_revision = min(
session.revision for session in self.sessions.itervalues())
# Remove all old revision history
if lowest_revision == self.revision:
self.server.clean(lowest_revision)
# Invoke hooks
invoke_hooks(self.hooks, "updated", self.revision)
def get_databases(self, session_id, revision, delta):
"""
"""
if delta == 0:
new = self.server.databases
old = None
else:
new = self.server.databases(revision)
old = self.server.databases(delta)
return new, old
def get_containers(self, session_id, database_id, revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.containers
old = None
else:
new = self.server \
.databases[database_id] \
.containers(revision)
old = self.server \
.databases[database_id] \
.containers(delta)
return new, old
def get_container_items(self, session_id, database_id, container_id,
revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items
old = None
else:
new = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items(revision)
old = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items(delta)
return new, old
def get_items(self, session_id, database_id, revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.items
old = None
else:
new = self.server \
.databases[database_id] \
.items(revision)
old = self.server \
.databases[database_id] \
.items(delta)
return new, old
def get_item(self, session_id, database_id, item_id, byte_range=None):
"""
"""
def _inner(data):
# Change state to streaming
session.state = State.streaming
try:
# Yield data
if isinstance(data, basestring):
yield data
else:
for chunk in data:
yield chunk
finally:
# Change state back to connected, even if an exception is
# raised.
session.state = State.connected
session = self.sessions[session_id]
item = self.server.databases[database_id].items[item_id]
# Increment counter for statistics. Make a distinction between requests
# with a byte range (play-pause) and ones without.
session.increment_counter("items")
if byte_range is None:
session.increment_counter("items_unique")
data, mimetype, size = self.get_item_data(session, item, byte_range)
return _inner(data), mimetype, size
def get_artwork(self, session_id, database_id, item_id):
"""
"""
session = self.sessions[session_id]
item = self.server.databases[database_id].items[item_id]
# Increment counter for statistics
session.increment_counter("artworks")
return self.get_artwork_data(session, item)
def get_item_data(self, session, item, byte_range=None):
"""
Fetch the requested item. The result can be an iterator, file
descriptor, or just raw bytes. Optionally, a begin and/or end range can
be specified.
The result should be an tuple, of the form (data, mimetype, size). The
data can be an iterator, file descriptor or raw bytes. In case a range
is requested, add a fourth tuple item, length. The length should be the
size of the requested data that is being returned.
Note: this method requires `Provider.supports_artwork = True`
:param Session session: Client session
:param Item item: Requested item.
:param tuple byte_range: Optional byte range to return a part of the
file.
:return: File descriptor, iterator or raw bytes.
"""
raise NotImplementedError("Needs to be overridden.")
def get_artwork_data(self, session, item):
"""
Fetch artwork for the requested item.
The result should be an tuple, of the form (data, mimetype, size). The
data can be an iterator, file descriptor or raw bytes.
Note: this method requires `Provider.supports_artwork = True`
:param Session session: Client session
:param Item item: Requested item.
:return: File descriptor, iterator or raw bytes.
"""
raise NotImplementedError("Needs to be overridden.")
|
basilfx/flask-daapserver
|
daapserver/provider.py
|
Provider.get_next_revision
|
python
|
def get_next_revision(self, session_id, revision, delta):
session = self.sessions[session_id]
session.state = State.connected
if delta == revision:
# Increment revision. Never decrement.
session.revision = max(session.revision, revision)
# Wait for next revision to become ready.
self.next_revision_available.wait()
return self.revision
|
Determine the next revision number for a given session id, revision
and delta.
In case the client is up-to-date, this method will block until the next
revision is available.
:param int session_id: Session identifier
:param int revision: Client revision number
:param int delta: Client revision delta (old client version number)
:return: Next revision number
:rtype: int
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/provider.py#L133-L158
| null |
class Provider(object):
"""
Base provider implementation. A provider is responsible for serving the
data to the client. This class should be subclassed.
"""
# Class type to use for sessions
session_class = Session
# Whether to artwork is supported
supports_artwork = False
# Whether persistent IDs are supported
supports_persistent_id = False
def __init__(self):
"""
Create a new Provider. This method should be invoked from the subclass.
"""
self.revision = 1
self.server = None
self.sessions = {}
self.session_counter = 0
self.hooks = {
"session_created": [],
"session_destroyed": [],
"updated": []
}
self.lock = gevent.lock.Semaphore()
self.next_revision_available = gevent.event.Event()
def create_session(self, user_agent, remote_address, client_version):
"""
Create a new session.
:param str user_agent: Client user agent
:param str remote_addr: Remote address of client
:param str client_version: Remote client version
:return: The new session id
:rtype: int
"""
self.session_counter += 1
self.sessions[self.session_counter] = session = self.session_class()
# Set session properties
session.user_agent = user_agent
session.remote_address = remote_address
session.client_version = client_version
# Invoke hooks
invoke_hooks(self.hooks, "session_created", self.session_counter)
return self.session_counter
def destroy_session(self, session_id):
"""
Destroy an (existing) session.
"""
try:
del self.sessions[session_id]
except KeyError:
pass
# Invoke hooks
invoke_hooks(self.hooks, "session_destroyed", session_id)
def update(self):
"""
Update this provider. Should be invoked when the server gets updated.
This method will notify all clients that wait for
`self.next_revision_available`.
"""
with self.lock:
# Increment revision and commit it.
self.revision += 1
self.server.commit(self.revision + 1)
# Unblock all waiting clients.
self.next_revision_available.set()
self.next_revision_available.clear()
# Check sessions to see which revision can be removed.
if self.sessions:
lowest_revision = min(
session.revision for session in self.sessions.itervalues())
# Remove all old revision history
if lowest_revision == self.revision:
self.server.clean(lowest_revision)
# Invoke hooks
invoke_hooks(self.hooks, "updated", self.revision)
def get_databases(self, session_id, revision, delta):
"""
"""
if delta == 0:
new = self.server.databases
old = None
else:
new = self.server.databases(revision)
old = self.server.databases(delta)
return new, old
def get_containers(self, session_id, database_id, revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.containers
old = None
else:
new = self.server \
.databases[database_id] \
.containers(revision)
old = self.server \
.databases[database_id] \
.containers(delta)
return new, old
def get_container_items(self, session_id, database_id, container_id,
revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items
old = None
else:
new = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items(revision)
old = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items(delta)
return new, old
def get_items(self, session_id, database_id, revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.items
old = None
else:
new = self.server \
.databases[database_id] \
.items(revision)
old = self.server \
.databases[database_id] \
.items(delta)
return new, old
def get_item(self, session_id, database_id, item_id, byte_range=None):
"""
"""
def _inner(data):
# Change state to streaming
session.state = State.streaming
try:
# Yield data
if isinstance(data, basestring):
yield data
else:
for chunk in data:
yield chunk
finally:
# Change state back to connected, even if an exception is
# raised.
session.state = State.connected
session = self.sessions[session_id]
item = self.server.databases[database_id].items[item_id]
# Increment counter for statistics. Make a distinction between requests
# with a byte range (play-pause) and ones without.
session.increment_counter("items")
if byte_range is None:
session.increment_counter("items_unique")
data, mimetype, size = self.get_item_data(session, item, byte_range)
return _inner(data), mimetype, size
def get_artwork(self, session_id, database_id, item_id):
"""
"""
session = self.sessions[session_id]
item = self.server.databases[database_id].items[item_id]
# Increment counter for statistics
session.increment_counter("artworks")
return self.get_artwork_data(session, item)
def get_item_data(self, session, item, byte_range=None):
"""
Fetch the requested item. The result can be an iterator, file
descriptor, or just raw bytes. Optionally, a begin and/or end range can
be specified.
The result should be an tuple, of the form (data, mimetype, size). The
data can be an iterator, file descriptor or raw bytes. In case a range
is requested, add a fourth tuple item, length. The length should be the
size of the requested data that is being returned.
Note: this method requires `Provider.supports_artwork = True`
:param Session session: Client session
:param Item item: Requested item.
:param tuple byte_range: Optional byte range to return a part of the
file.
:return: File descriptor, iterator or raw bytes.
"""
raise NotImplementedError("Needs to be overridden.")
def get_artwork_data(self, session, item):
"""
Fetch artwork for the requested item.
The result should be an tuple, of the form (data, mimetype, size). The
data can be an iterator, file descriptor or raw bytes.
Note: this method requires `Provider.supports_artwork = True`
:param Session session: Client session
:param Item item: Requested item.
:return: File descriptor, iterator or raw bytes.
"""
raise NotImplementedError("Needs to be overridden.")
|
basilfx/flask-daapserver
|
daapserver/provider.py
|
Provider.update
|
python
|
def update(self):
with self.lock:
# Increment revision and commit it.
self.revision += 1
self.server.commit(self.revision + 1)
# Unblock all waiting clients.
self.next_revision_available.set()
self.next_revision_available.clear()
# Check sessions to see which revision can be removed.
if self.sessions:
lowest_revision = min(
session.revision for session in self.sessions.itervalues())
# Remove all old revision history
if lowest_revision == self.revision:
self.server.clean(lowest_revision)
# Invoke hooks
invoke_hooks(self.hooks, "updated", self.revision)
|
Update this provider. Should be invoked when the server gets updated.
This method will notify all clients that wait for
`self.next_revision_available`.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/provider.py#L160-L187
|
[
"def invoke_hooks(hooks, name, *args, **kwargs):\n \"\"\"\n Invoke one or more hooks that have been registered under `name'. Additional\n arguments and keyword arguments can be provided.\n\n There is no exception catching, so if a hook fails, it will disrupt the\n chain and/or rest of program.\n \"\"\"\n\n callbacks = hooks.get(name, [])\n\n for callback in callbacks:\n callback(*args, **kwargs)\n"
] |
class Provider(object):
"""
Base provider implementation. A provider is responsible for serving the
data to the client. This class should be subclassed.
"""
# Class type to use for sessions
session_class = Session
# Whether to artwork is supported
supports_artwork = False
# Whether persistent IDs are supported
supports_persistent_id = False
def __init__(self):
"""
Create a new Provider. This method should be invoked from the subclass.
"""
self.revision = 1
self.server = None
self.sessions = {}
self.session_counter = 0
self.hooks = {
"session_created": [],
"session_destroyed": [],
"updated": []
}
self.lock = gevent.lock.Semaphore()
self.next_revision_available = gevent.event.Event()
def create_session(self, user_agent, remote_address, client_version):
"""
Create a new session.
:param str user_agent: Client user agent
:param str remote_addr: Remote address of client
:param str client_version: Remote client version
:return: The new session id
:rtype: int
"""
self.session_counter += 1
self.sessions[self.session_counter] = session = self.session_class()
# Set session properties
session.user_agent = user_agent
session.remote_address = remote_address
session.client_version = client_version
# Invoke hooks
invoke_hooks(self.hooks, "session_created", self.session_counter)
return self.session_counter
def destroy_session(self, session_id):
"""
Destroy an (existing) session.
"""
try:
del self.sessions[session_id]
except KeyError:
pass
# Invoke hooks
invoke_hooks(self.hooks, "session_destroyed", session_id)
def get_next_revision(self, session_id, revision, delta):
"""
Determine the next revision number for a given session id, revision
and delta.
In case the client is up-to-date, this method will block until the next
revision is available.
:param int session_id: Session identifier
:param int revision: Client revision number
:param int delta: Client revision delta (old client version number)
:return: Next revision number
:rtype: int
"""
session = self.sessions[session_id]
session.state = State.connected
if delta == revision:
# Increment revision. Never decrement.
session.revision = max(session.revision, revision)
# Wait for next revision to become ready.
self.next_revision_available.wait()
return self.revision
def get_databases(self, session_id, revision, delta):
"""
"""
if delta == 0:
new = self.server.databases
old = None
else:
new = self.server.databases(revision)
old = self.server.databases(delta)
return new, old
def get_containers(self, session_id, database_id, revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.containers
old = None
else:
new = self.server \
.databases[database_id] \
.containers(revision)
old = self.server \
.databases[database_id] \
.containers(delta)
return new, old
def get_container_items(self, session_id, database_id, container_id,
revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items
old = None
else:
new = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items(revision)
old = self.server \
.databases[database_id] \
.containers[container_id] \
.container_items(delta)
return new, old
def get_items(self, session_id, database_id, revision, delta):
"""
"""
if delta == 0:
new = self.server \
.databases[database_id] \
.items
old = None
else:
new = self.server \
.databases[database_id] \
.items(revision)
old = self.server \
.databases[database_id] \
.items(delta)
return new, old
def get_item(self, session_id, database_id, item_id, byte_range=None):
"""
"""
def _inner(data):
# Change state to streaming
session.state = State.streaming
try:
# Yield data
if isinstance(data, basestring):
yield data
else:
for chunk in data:
yield chunk
finally:
# Change state back to connected, even if an exception is
# raised.
session.state = State.connected
session = self.sessions[session_id]
item = self.server.databases[database_id].items[item_id]
# Increment counter for statistics. Make a distinction between requests
# with a byte range (play-pause) and ones without.
session.increment_counter("items")
if byte_range is None:
session.increment_counter("items_unique")
data, mimetype, size = self.get_item_data(session, item, byte_range)
return _inner(data), mimetype, size
def get_artwork(self, session_id, database_id, item_id):
"""
"""
session = self.sessions[session_id]
item = self.server.databases[database_id].items[item_id]
# Increment counter for statistics
session.increment_counter("artworks")
return self.get_artwork_data(session, item)
def get_item_data(self, session, item, byte_range=None):
"""
Fetch the requested item. The result can be an iterator, file
descriptor, or just raw bytes. Optionally, a begin and/or end range can
be specified.
The result should be an tuple, of the form (data, mimetype, size). The
data can be an iterator, file descriptor or raw bytes. In case a range
is requested, add a fourth tuple item, length. The length should be the
size of the requested data that is being returned.
Note: this method requires `Provider.supports_artwork = True`
:param Session session: Client session
:param Item item: Requested item.
:param tuple byte_range: Optional byte range to return a part of the
file.
:return: File descriptor, iterator or raw bytes.
"""
raise NotImplementedError("Needs to be overridden.")
def get_artwork_data(self, session, item):
"""
Fetch artwork for the requested item.
The result should be an tuple, of the form (data, mimetype, size). The
data can be an iterator, file descriptor or raw bytes.
Note: this method requires `Provider.supports_artwork = True`
:param Session session: Client session
:param Item item: Requested item.
:return: File descriptor, iterator or raw bytes.
"""
raise NotImplementedError("Needs to be overridden.")
|
basilfx/flask-daapserver
|
daapserver/provider.py
|
LocalFileProvider.get_item_data
|
python
|
def get_item_data(self, session, item, byte_range=None):
# Parse byte range
if byte_range is not None:
begin, end = parse_byte_range(byte_range, max_byte=item.file_size)
else:
begin, end = 0, item.file_size
# Open the file
fp = open(item.file_name, "rb+")
if not begin:
return fp, item.file_type, item.file_size
elif begin and not end:
fp.seek(begin)
return fp, item.file_type, item.file_size
elif begin and end:
fp.seek(begin)
data = fp.read(end - begin)
result = cStringIO.StringIO(data)
return result, item.file_type, item.file_size
|
Return a file pointer to the item file. Assumes `item.file_name` points
to the file on disk.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/provider.py#L356-L382
|
[
"def parse_byte_range(byte_range, min_byte=0, max_byte=sys.maxint):\n \"\"\"\n Parse and validate a byte range. A byte range is a tuple of (begin, end)\n indices. `begin' should be smaller than `end', and both should fall within\n the `min_byte' and `max_byte'.\n\n In case of a violation, a `ValueError` is raised.\n \"\"\"\n\n if not byte_range:\n return min_byte, max_byte\n\n begin = byte_range[0] or min_byte\n end = byte_range[1] or max_byte\n\n if end < begin:\n raise ValueError(\"End before begin\")\n\n if begin < min_byte:\n raise ValueError(\"Begin smaller than min\")\n\n if end > max_byte:\n raise ValueError(\"End larger than max\")\n\n return begin, end\n"
] |
class LocalFileProvider(Provider):
"""
Tiny implementation of a local file provider. Streams items and data from
disk.
"""
supports_artwork = True
def get_artwork_data(self, session, item):
"""
Return a file pointer to the artwork file. Assumes `item.album_art`
points to the file on disk.
"""
fp = open(item.album_art, "rb+")
return fp, None, None
|
basilfx/flask-daapserver
|
utils/benchmark_store.py
|
parse_arguments
|
python
|
def parse_arguments():
parser = argparse.ArgumentParser()
# Add options
parser.add_argument(
"-n", "--number", action="store", default=1000000, type=int,
help="number of items")
parser.add_argument(
"-p", "--pause", action="store_true", help="pause after execution")
# Parse command line
return parser.parse_args(), parser
|
Parse commandline arguments.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/utils/benchmark_store.py#L9-L24
| null |
from six.moves import xrange
from daapserver.revision import RevisionStore
import argparse
import sys
def main():
"""
Run a benchmark for N items. If N is not specified, take 1,000,000 for N.
"""
# Parse arguments and configure application instance.
arguments, parser = parse_arguments()
# Start iterating
store = RevisionStore()
sys.stdout.write("Iterating over %d items.\n" % arguments.number)
for i in xrange(arguments.number):
key = chr(65 + (i % 26))
value = [key * (i % 26), key * (i % 13), key * (i % 5)]
store.add(key, value)
# Wait for an enter
if arguments.pause:
sys.stdout.write("Done!")
sys.stdin.readline()
# E.g. `python benchmark_store.py [-n <items>] [-p]`
if __name__ == "__main__":
sys.exit(main())
|
basilfx/flask-daapserver
|
utils/benchmark_store.py
|
main
|
python
|
def main():
# Parse arguments and configure application instance.
arguments, parser = parse_arguments()
# Start iterating
store = RevisionStore()
sys.stdout.write("Iterating over %d items.\n" % arguments.number)
for i in xrange(arguments.number):
key = chr(65 + (i % 26))
value = [key * (i % 26), key * (i % 13), key * (i % 5)]
store.add(key, value)
# Wait for an enter
if arguments.pause:
sys.stdout.write("Done!")
sys.stdin.readline()
|
Run a benchmark for N items. If N is not specified, take 1,000,000 for N.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/utils/benchmark_store.py#L27-L48
|
[
"def parse_arguments():\n \"\"\"\n Parse commandline arguments.\n \"\"\"\n\n parser = argparse.ArgumentParser()\n\n # Add options\n parser.add_argument(\n \"-n\", \"--number\", action=\"store\", default=1000000, type=int,\n help=\"number of items\")\n parser.add_argument(\n \"-p\", \"--pause\", action=\"store_true\", help=\"pause after execution\")\n\n # Parse command line\n return parser.parse_args(), parser\n"
] |
from six.moves import xrange
from daapserver.revision import RevisionStore
import argparse
import sys
def parse_arguments():
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser()
# Add options
parser.add_argument(
"-n", "--number", action="store", default=1000000, type=int,
help="number of items")
parser.add_argument(
"-p", "--pause", action="store_true", help="pause after execution")
# Parse command line
return parser.parse_args(), parser
# E.g. `python benchmark_store.py [-n <items>] [-p]`
if __name__ == "__main__":
sys.exit(main())
|
basilfx/flask-daapserver
|
utils/transformer.py
|
install_new_pipeline
|
python
|
def install_new_pipeline():
def new_create_pipeline(context, *args, **kwargs):
result = old_create_pipeline(context, *args, **kwargs)
result.insert(1, DAAPObjectTransformer(context))
return result
old_create_pipeline = Pipeline.create_pipeline
Pipeline.create_pipeline = new_create_pipeline
|
Install above transformer into the existing pipeline creator.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/utils/transformer.py#L39-L51
| null |
from Cython.Compiler import Pipeline, Visitor, ExprNodes, StringEncoding
import imp
import os
# Load the DAAP data. Cannot use normal import because setup.py will install
# dependencies after this file is imported.
daap_data = imp.load_source("daap_data", os.path.join(
os.path.dirname(__file__), "../daapserver/daap_data.py"))
class DAAPObjectTransformer(Visitor.CythonTransform):
"""
Convert all DAAPObject(x, y) into SpeedyDAAPObject(code[x], type[x], y).
"""
def visit_CallNode(self, node):
if isinstance(node.function, ExprNodes.NameNode) and \
node.function.name == u"DAAPObject":
# Make sure we only convert DAAPObject(x, y) calls, nothing more.
if len(node.args) == 2:
code = daap_data.dmap_names[node.args[0].value]
itype = daap_data.dmap_code_types[code][1]
node.function.name = self.context.intern_ustring(
u"SpeedyDAAPObject")
node.args[0] = ExprNodes.StringNode(
node.pos, value=StringEncoding.BytesLiteral(code))
node.args.insert(1, ExprNodes.IntNode(
node.pos, value=str(itype)))
# Visit method body.
self.visitchildren(node)
return node
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.