repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
elmotec/massedit
|
massedit.py
|
MassEdit.set_code_exprs
|
python
|
def set_code_exprs(self, codes):
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
|
Convenience: sets all the code expressions at once.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L310-L315
|
[
"def append_code_expr(self, code):\n \"\"\"Compile argument and adds it to the list of code objects.\"\"\"\n # expects a string.\n if isinstance(code, str) and not isinstance(code, unicode):\n code = unicode(code)\n if not isinstance(code, unicode):\n raise TypeError(\"string expected\")\n log.debug(\"compiling code %s...\", code)\n try:\n code_obj = compile(code, '<string>', 'eval')\n self.code_objs[code] = code_obj\n except SyntaxError as syntax_err:\n log.error(\"cannot compile %s: %s\", code, syntax_err)\n raise\n log.debug(\"compiled code %s\", code)\n"
] |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
elmotec/massedit
|
massedit.py
|
MassEdit.set_functions
|
python
|
def set_functions(self, functions):
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
|
Check functions passed as argument and set them to be used.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L317-L324
|
[
"def append_function(self, function):\n \"\"\"Append the function to the list of functions to be called.\n\n If the function is already a callable, use it. If it's a type str\n try to interpret it as [module]:?<callable>, load the module\n if there is one and retrieve the callable.\n\n Argument:\n function (str or callable): function to call on input.\n\n \"\"\"\n if not hasattr(function, '__call__'):\n function = get_function(function)\n if not hasattr(function, '__call__'):\n raise ValueError(\"function is expected to be callable\")\n self._functions.append(function)\n log.debug(\"registered %s\", function.__name__)\n"
] |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
ChristianKuehnel/btlewrap
|
btlewrap/pygatt.py
|
wrap_exception
|
python
|
def wrap_exception(func: Callable) -> Callable:
try:
# only do the wrapping if pygatt is installed.
# otherwise it's pointless anyway
from pygatt.backends.bgapi.exceptions import BGAPIError
from pygatt.exceptions import NotConnectedError
except ImportError:
return func
def _func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except BGAPIError as exception:
raise BluetoothBackendException() from exception
except NotConnectedError as exception:
raise BluetoothBackendException() from exception
return _func_wrapper
|
Decorator to wrap pygatt exceptions into BluetoothBackendException.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/pygatt.py#L9-L27
| null |
"""Bluetooth backend for Blue Giga based bluetooth devices.
This backend uses the pygatt API: https://github.com/peplin/pygatt
"""
from typing import Callable
from btlewrap.base import AbstractBackend, BluetoothBackendException
class PygattBackend(AbstractBackend):
"""Bluetooth backend for Blue Giga based bluetooth devices."""
@wrap_exception
def __init__(self, adapter: str = None, address_type='public'):
"""Create a new instance.
Note: the parameter "adapter" is ignored, pygatt detects the right USB port automagically.
"""
super(PygattBackend, self).__init__(adapter)
self.check_backend()
import pygatt
self._adapter = pygatt.BGAPIBackend()
self._adapter.start()
self._device = None
self._address_type = address_type
def __del__(self):
if self._adapter is not None:
self._adapter.stop()
@wrap_exception
def connect(self, mac: str):
"""Connect to a device."""
import pygatt
address_type = pygatt.BLEAddressType.public
if self._address_type == 'random':
address_type = pygatt.BLEAddressType.random
self._device = self._adapter.connect(mac, address_type=address_type)
def is_connected(self) -> bool:
"""Check if connected to a device."""
return self._device is not None
@wrap_exception
def disconnect(self):
"""Disconnect from a device."""
if self.is_connected():
self._device.disconnect()
self._device = None
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read a handle from the device."""
if not self.is_connected():
raise BluetoothBackendException('Not connected to device!')
return self._device.char_read_handle(handle)
@wrap_exception
def write_handle(self, handle: int, value: bytes):
"""Write a handle to the device."""
if not self.is_connected():
raise BluetoothBackendException('Not connected to device!')
self._device.char_write_handle(handle, value, True)
return True
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import pygatt # noqa: F401 # pylint: disable=unused-import
return True
except ImportError:
return False
|
ChristianKuehnel/btlewrap
|
btlewrap/pygatt.py
|
PygattBackend.connect
|
python
|
def connect(self, mac: str):
import pygatt
address_type = pygatt.BLEAddressType.public
if self._address_type == 'random':
address_type = pygatt.BLEAddressType.random
self._device = self._adapter.connect(mac, address_type=address_type)
|
Connect to a device.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/pygatt.py#L53-L60
| null |
class PygattBackend(AbstractBackend):
"""Bluetooth backend for Blue Giga based bluetooth devices."""
@wrap_exception
def __init__(self, adapter: str = None, address_type='public'):
"""Create a new instance.
Note: the parameter "adapter" is ignored, pygatt detects the right USB port automagically.
"""
super(PygattBackend, self).__init__(adapter)
self.check_backend()
import pygatt
self._adapter = pygatt.BGAPIBackend()
self._adapter.start()
self._device = None
self._address_type = address_type
def __del__(self):
if self._adapter is not None:
self._adapter.stop()
@wrap_exception
def is_connected(self) -> bool:
"""Check if connected to a device."""
return self._device is not None
@wrap_exception
def disconnect(self):
"""Disconnect from a device."""
if self.is_connected():
self._device.disconnect()
self._device = None
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read a handle from the device."""
if not self.is_connected():
raise BluetoothBackendException('Not connected to device!')
return self._device.char_read_handle(handle)
@wrap_exception
def write_handle(self, handle: int, value: bytes):
"""Write a handle to the device."""
if not self.is_connected():
raise BluetoothBackendException('Not connected to device!')
self._device.char_write_handle(handle, value, True)
return True
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import pygatt # noqa: F401 # pylint: disable=unused-import
return True
except ImportError:
return False
|
ChristianKuehnel/btlewrap
|
btlewrap/pygatt.py
|
PygattBackend.read_handle
|
python
|
def read_handle(self, handle: int) -> bytes:
if not self.is_connected():
raise BluetoothBackendException('Not connected to device!')
return self._device.char_read_handle(handle)
|
Read a handle from the device.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/pygatt.py#L74-L78
|
[
"def is_connected(self) -> bool:\n \"\"\"Check if connected to a device.\"\"\"\n return self._device is not None\n"
] |
class PygattBackend(AbstractBackend):
"""Bluetooth backend for Blue Giga based bluetooth devices."""
@wrap_exception
def __init__(self, adapter: str = None, address_type='public'):
"""Create a new instance.
Note: the parameter "adapter" is ignored, pygatt detects the right USB port automagically.
"""
super(PygattBackend, self).__init__(adapter)
self.check_backend()
import pygatt
self._adapter = pygatt.BGAPIBackend()
self._adapter.start()
self._device = None
self._address_type = address_type
def __del__(self):
if self._adapter is not None:
self._adapter.stop()
@wrap_exception
def connect(self, mac: str):
"""Connect to a device."""
import pygatt
address_type = pygatt.BLEAddressType.public
if self._address_type == 'random':
address_type = pygatt.BLEAddressType.random
self._device = self._adapter.connect(mac, address_type=address_type)
def is_connected(self) -> bool:
"""Check if connected to a device."""
return self._device is not None
@wrap_exception
def disconnect(self):
"""Disconnect from a device."""
if self.is_connected():
self._device.disconnect()
self._device = None
@wrap_exception
@wrap_exception
def write_handle(self, handle: int, value: bytes):
"""Write a handle to the device."""
if not self.is_connected():
raise BluetoothBackendException('Not connected to device!')
self._device.char_write_handle(handle, value, True)
return True
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import pygatt # noqa: F401 # pylint: disable=unused-import
return True
except ImportError:
return False
|
ChristianKuehnel/btlewrap
|
btlewrap/pygatt.py
|
PygattBackend.write_handle
|
python
|
def write_handle(self, handle: int, value: bytes):
if not self.is_connected():
raise BluetoothBackendException('Not connected to device!')
self._device.char_write_handle(handle, value, True)
return True
|
Write a handle to the device.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/pygatt.py#L81-L86
|
[
"def is_connected(self) -> bool:\n \"\"\"Check if connected to a device.\"\"\"\n return self._device is not None\n"
] |
class PygattBackend(AbstractBackend):
"""Bluetooth backend for Blue Giga based bluetooth devices."""
@wrap_exception
def __init__(self, adapter: str = None, address_type='public'):
"""Create a new instance.
Note: the parameter "adapter" is ignored, pygatt detects the right USB port automagically.
"""
super(PygattBackend, self).__init__(adapter)
self.check_backend()
import pygatt
self._adapter = pygatt.BGAPIBackend()
self._adapter.start()
self._device = None
self._address_type = address_type
def __del__(self):
if self._adapter is not None:
self._adapter.stop()
@wrap_exception
def connect(self, mac: str):
"""Connect to a device."""
import pygatt
address_type = pygatt.BLEAddressType.public
if self._address_type == 'random':
address_type = pygatt.BLEAddressType.random
self._device = self._adapter.connect(mac, address_type=address_type)
def is_connected(self) -> bool:
"""Check if connected to a device."""
return self._device is not None
@wrap_exception
def disconnect(self):
"""Disconnect from a device."""
if self.is_connected():
self._device.disconnect()
self._device = None
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read a handle from the device."""
if not self.is_connected():
raise BluetoothBackendException('Not connected to device!')
return self._device.char_read_handle(handle)
@wrap_exception
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import pygatt # noqa: F401 # pylint: disable=unused-import
return True
except ImportError:
return False
|
ChristianKuehnel/btlewrap
|
btlewrap/bluepy.py
|
wrap_exception
|
python
|
def wrap_exception(func: Callable) -> Callable:
try:
# only do the wrapping if bluepy is installed.
# otherwise it's pointless anyway
from bluepy.btle import BTLEException
except ImportError:
return func
def _func_wrapper(*args, **kwargs):
error_count = 0
last_error = None
while error_count < RETRY_LIMIT:
try:
return func(*args, **kwargs)
except BTLEException as exception:
error_count += 1
last_error = exception
time.sleep(RETRY_DELAY)
_LOGGER.debug('Call to %s failed, try %d of %d', func, error_count, RETRY_LIMIT)
raise BluetoothBackendException() from last_error
return _func_wrapper
|
Decorator to wrap BTLEExceptions into BluetoothBackendException.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/bluepy.py#L13-L35
| null |
"""Backend for Miflora using the bluepy library."""
import re
import logging
import time
from typing import List, Tuple, Callable
from btlewrap.base import AbstractBackend, BluetoothBackendException
_LOGGER = logging.getLogger(__name__)
RETRY_LIMIT = 3
RETRY_DELAY = 0.1
class BluepyBackend(AbstractBackend):
"""Backend for Miflora using the bluepy library."""
def __init__(self, adapter: str = 'hci0', address_type: str = 'public'):
"""Create new instance of the backend."""
super(BluepyBackend, self).__init__(adapter)
self.address_type = address_type
self._peripheral = None
@wrap_exception
def connect(self, mac: str):
"""Connect to a device."""
from bluepy.btle import Peripheral
match_result = re.search(r'hci([\d]+)', self.adapter)
if match_result is None:
raise BluetoothBackendException(
'Invalid pattern "{}" for BLuetooth adpater. '
'Expetected something like "hci0".'.format(self.adapter))
iface = int(match_result.group(1))
self._peripheral = Peripheral(mac, iface=iface, addrType=self.address_type)
@wrap_exception
def disconnect(self):
"""Disconnect from a device if connected."""
if self._peripheral is None:
return
self._peripheral.disconnect()
self._peripheral = None
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.readCharacteristic(handle)
@wrap_exception
def write_handle(self, handle: int, value: bytes):
"""Write a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.writeCharacteristic(handle, value, True)
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
self.write_handle(handle, self._DATA_MODE_LISTEN)
self._peripheral.withDelegate(delegate)
return self._peripheral.waitForNotifications(notification_timeout)
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import bluepy.btle # noqa: F401 #pylint: disable=unused-import
return True
except ImportError as importerror:
_LOGGER.error('bluepy not found: %s', str(importerror))
return False
@staticmethod
@wrap_exception
def scan_for_devices(timeout: float) -> List[Tuple[str, str]]:
"""Scan for bluetooth low energy devices.
Note this must be run as root!"""
from bluepy.btle import Scanner
scanner = Scanner()
result = []
for device in scanner.scan(timeout):
result.append((device.addr, device.getValueText(9)))
return result
|
ChristianKuehnel/btlewrap
|
btlewrap/bluepy.py
|
BluepyBackend.connect
|
python
|
def connect(self, mac: str):
from bluepy.btle import Peripheral
match_result = re.search(r'hci([\d]+)', self.adapter)
if match_result is None:
raise BluetoothBackendException(
'Invalid pattern "{}" for BLuetooth adpater. '
'Expetected something like "hci0".'.format(self.adapter))
iface = int(match_result.group(1))
self._peripheral = Peripheral(mac, iface=iface, addrType=self.address_type)
|
Connect to a device.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/bluepy.py#L48-L57
| null |
class BluepyBackend(AbstractBackend):
"""Backend for Miflora using the bluepy library."""
def __init__(self, adapter: str = 'hci0', address_type: str = 'public'):
"""Create new instance of the backend."""
super(BluepyBackend, self).__init__(adapter)
self.address_type = address_type
self._peripheral = None
@wrap_exception
@wrap_exception
def disconnect(self):
"""Disconnect from a device if connected."""
if self._peripheral is None:
return
self._peripheral.disconnect()
self._peripheral = None
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.readCharacteristic(handle)
@wrap_exception
def write_handle(self, handle: int, value: bytes):
"""Write a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.writeCharacteristic(handle, value, True)
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
self.write_handle(handle, self._DATA_MODE_LISTEN)
self._peripheral.withDelegate(delegate)
return self._peripheral.waitForNotifications(notification_timeout)
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import bluepy.btle # noqa: F401 #pylint: disable=unused-import
return True
except ImportError as importerror:
_LOGGER.error('bluepy not found: %s', str(importerror))
return False
@staticmethod
@wrap_exception
def scan_for_devices(timeout: float) -> List[Tuple[str, str]]:
"""Scan for bluetooth low energy devices.
Note this must be run as root!"""
from bluepy.btle import Scanner
scanner = Scanner()
result = []
for device in scanner.scan(timeout):
result.append((device.addr, device.getValueText(9)))
return result
|
ChristianKuehnel/btlewrap
|
btlewrap/bluepy.py
|
BluepyBackend.read_handle
|
python
|
def read_handle(self, handle: int) -> bytes:
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.readCharacteristic(handle)
|
Read a handle from the device.
You must be connected to do this.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/bluepy.py#L69-L76
| null |
class BluepyBackend(AbstractBackend):
"""Backend for Miflora using the bluepy library."""
def __init__(self, adapter: str = 'hci0', address_type: str = 'public'):
"""Create new instance of the backend."""
super(BluepyBackend, self).__init__(adapter)
self.address_type = address_type
self._peripheral = None
@wrap_exception
def connect(self, mac: str):
"""Connect to a device."""
from bluepy.btle import Peripheral
match_result = re.search(r'hci([\d]+)', self.adapter)
if match_result is None:
raise BluetoothBackendException(
'Invalid pattern "{}" for BLuetooth adpater. '
'Expetected something like "hci0".'.format(self.adapter))
iface = int(match_result.group(1))
self._peripheral = Peripheral(mac, iface=iface, addrType=self.address_type)
@wrap_exception
def disconnect(self):
"""Disconnect from a device if connected."""
if self._peripheral is None:
return
self._peripheral.disconnect()
self._peripheral = None
@wrap_exception
@wrap_exception
def write_handle(self, handle: int, value: bytes):
"""Write a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.writeCharacteristic(handle, value, True)
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
self.write_handle(handle, self._DATA_MODE_LISTEN)
self._peripheral.withDelegate(delegate)
return self._peripheral.waitForNotifications(notification_timeout)
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import bluepy.btle # noqa: F401 #pylint: disable=unused-import
return True
except ImportError as importerror:
_LOGGER.error('bluepy not found: %s', str(importerror))
return False
@staticmethod
@wrap_exception
def scan_for_devices(timeout: float) -> List[Tuple[str, str]]:
"""Scan for bluetooth low energy devices.
Note this must be run as root!"""
from bluepy.btle import Scanner
scanner = Scanner()
result = []
for device in scanner.scan(timeout):
result.append((device.addr, device.getValueText(9)))
return result
|
ChristianKuehnel/btlewrap
|
btlewrap/bluepy.py
|
BluepyBackend.write_handle
|
python
|
def write_handle(self, handle: int, value: bytes):
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.writeCharacteristic(handle, value, True)
|
Write a handle from the device.
You must be connected to do this.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/bluepy.py#L79-L86
| null |
class BluepyBackend(AbstractBackend):
"""Backend for Miflora using the bluepy library."""
def __init__(self, adapter: str = 'hci0', address_type: str = 'public'):
"""Create new instance of the backend."""
super(BluepyBackend, self).__init__(adapter)
self.address_type = address_type
self._peripheral = None
@wrap_exception
def connect(self, mac: str):
"""Connect to a device."""
from bluepy.btle import Peripheral
match_result = re.search(r'hci([\d]+)', self.adapter)
if match_result is None:
raise BluetoothBackendException(
'Invalid pattern "{}" for BLuetooth adpater. '
'Expetected something like "hci0".'.format(self.adapter))
iface = int(match_result.group(1))
self._peripheral = Peripheral(mac, iface=iface, addrType=self.address_type)
@wrap_exception
def disconnect(self):
"""Disconnect from a device if connected."""
if self._peripheral is None:
return
self._peripheral.disconnect()
self._peripheral = None
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.readCharacteristic(handle)
@wrap_exception
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
self.write_handle(handle, self._DATA_MODE_LISTEN)
self._peripheral.withDelegate(delegate)
return self._peripheral.waitForNotifications(notification_timeout)
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import bluepy.btle # noqa: F401 #pylint: disable=unused-import
return True
except ImportError as importerror:
_LOGGER.error('bluepy not found: %s', str(importerror))
return False
@staticmethod
@wrap_exception
def scan_for_devices(timeout: float) -> List[Tuple[str, str]]:
"""Scan for bluetooth low energy devices.
Note this must be run as root!"""
from bluepy.btle import Scanner
scanner = Scanner()
result = []
for device in scanner.scan(timeout):
result.append((device.addr, device.getValueText(9)))
return result
|
ChristianKuehnel/btlewrap
|
btlewrap/bluepy.py
|
BluepyBackend.check_backend
|
python
|
def check_backend() -> bool:
try:
import bluepy.btle # noqa: F401 #pylint: disable=unused-import
return True
except ImportError as importerror:
_LOGGER.error('bluepy not found: %s', str(importerror))
return False
|
Check if the backend is available.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/bluepy.py#L97-L104
| null |
class BluepyBackend(AbstractBackend):
"""Backend for Miflora using the bluepy library."""
def __init__(self, adapter: str = 'hci0', address_type: str = 'public'):
"""Create new instance of the backend."""
super(BluepyBackend, self).__init__(adapter)
self.address_type = address_type
self._peripheral = None
@wrap_exception
def connect(self, mac: str):
"""Connect to a device."""
from bluepy.btle import Peripheral
match_result = re.search(r'hci([\d]+)', self.adapter)
if match_result is None:
raise BluetoothBackendException(
'Invalid pattern "{}" for BLuetooth adpater. '
'Expetected something like "hci0".'.format(self.adapter))
iface = int(match_result.group(1))
self._peripheral = Peripheral(mac, iface=iface, addrType=self.address_type)
@wrap_exception
def disconnect(self):
"""Disconnect from a device if connected."""
if self._peripheral is None:
return
self._peripheral.disconnect()
self._peripheral = None
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.readCharacteristic(handle)
@wrap_exception
def write_handle(self, handle: int, value: bytes):
"""Write a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.writeCharacteristic(handle, value, True)
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
self.write_handle(handle, self._DATA_MODE_LISTEN)
self._peripheral.withDelegate(delegate)
return self._peripheral.waitForNotifications(notification_timeout)
@staticmethod
@staticmethod
@wrap_exception
def scan_for_devices(timeout: float) -> List[Tuple[str, str]]:
"""Scan for bluetooth low energy devices.
Note this must be run as root!"""
from bluepy.btle import Scanner
scanner = Scanner()
result = []
for device in scanner.scan(timeout):
result.append((device.addr, device.getValueText(9)))
return result
|
ChristianKuehnel/btlewrap
|
btlewrap/bluepy.py
|
BluepyBackend.scan_for_devices
|
python
|
def scan_for_devices(timeout: float) -> List[Tuple[str, str]]:
from bluepy.btle import Scanner
scanner = Scanner()
result = []
for device in scanner.scan(timeout):
result.append((device.addr, device.getValueText(9)))
return result
|
Scan for bluetooth low energy devices.
Note this must be run as root!
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/bluepy.py#L108-L118
| null |
class BluepyBackend(AbstractBackend):
"""Backend for Miflora using the bluepy library."""
def __init__(self, adapter: str = 'hci0', address_type: str = 'public'):
"""Create new instance of the backend."""
super(BluepyBackend, self).__init__(adapter)
self.address_type = address_type
self._peripheral = None
@wrap_exception
def connect(self, mac: str):
"""Connect to a device."""
from bluepy.btle import Peripheral
match_result = re.search(r'hci([\d]+)', self.adapter)
if match_result is None:
raise BluetoothBackendException(
'Invalid pattern "{}" for BLuetooth adpater. '
'Expetected something like "hci0".'.format(self.adapter))
iface = int(match_result.group(1))
self._peripheral = Peripheral(mac, iface=iface, addrType=self.address_type)
@wrap_exception
def disconnect(self):
"""Disconnect from a device if connected."""
if self._peripheral is None:
return
self._peripheral.disconnect()
self._peripheral = None
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.readCharacteristic(handle)
@wrap_exception
def write_handle(self, handle: int, value: bytes):
"""Write a handle from the device.
You must be connected to do this.
"""
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
return self._peripheral.writeCharacteristic(handle, value, True)
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
if self._peripheral is None:
raise BluetoothBackendException('not connected to backend')
self.write_handle(handle, self._DATA_MODE_LISTEN)
self._peripheral.withDelegate(delegate)
return self._peripheral.waitForNotifications(notification_timeout)
@staticmethod
def check_backend() -> bool:
"""Check if the backend is available."""
try:
import bluepy.btle # noqa: F401 #pylint: disable=unused-import
return True
except ImportError as importerror:
_LOGGER.error('bluepy not found: %s', str(importerror))
return False
@staticmethod
@wrap_exception
|
ChristianKuehnel/btlewrap
|
btlewrap/gatttool.py
|
wrap_exception
|
python
|
def wrap_exception(func: Callable) -> Callable:
def _func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except IOError as exception:
raise BluetoothBackendException() from exception
return _func_wrapper
|
Wrap all IOErrors to BluetoothBackendException
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/gatttool.py#L19-L27
| null |
"""
Reading from the sensor is handled by the command line tool "gatttool" that
is part of bluez on Linux.
No other operating systems are supported at the moment
"""
from threading import current_thread
import os
import logging
import re
import time
from typing import Callable
from subprocess import Popen, PIPE, TimeoutExpired, signal, call
from btlewrap.base import AbstractBackend, BluetoothBackendException
_LOGGER = logging.getLogger(__name__)
class GatttoolBackend(AbstractBackend):
""" Backend using gatttool."""
# pylint: disable=subprocess-popen-preexec-fn
def __init__(self, adapter: str = 'hci0', retries: int = 3, timeout: float = 20, address_type: str = 'public'):
super(GatttoolBackend, self).__init__(adapter)
self.adapter = adapter
self.retries = retries
self.timeout = timeout
self.address_type = address_type
self._mac = None
def connect(self, mac: str):
"""Connect to sensor.
Connection handling is not required when using gatttool, but we still need the mac
"""
self._mac = mac
def disconnect(self):
"""Disconnect from sensor.
Connection handling is not required when using gatttool.
"""
self._mac = None
def is_connected(self) -> bool:
"""Check if we are connected to the backend."""
return self._mac is not None
@wrap_exception
def write_handle(self, handle: int, value: bytes):
# noqa: C901
# pylint: disable=arguments-differ
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: value - value to write to the given handle
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(value), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output
if "successfully" in result:
_LOGGER.debug(
"Exit write_ble with result (%s)", current_thread())
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
"""Listen for characteristics changes from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
a value of 0x0100 is written to register for listening
@param: delegate - gatttool receives the
--listen argument and the delegate object's handleNotification is
called for every returned row
@param: notification_timeout
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(self._DATA_MODE_LISTEN),
self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s", notification_timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=notification_timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group, because listening always hangs
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Listening stopped forcefully after timeout.")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output to determine success
if "successfully" in result:
_LOGGER.debug("Exit write_ble with result (%s)", current_thread())
# extract useful data.
for element in self.extract_notification_payload(result):
delegate.handleNotification(handle, bytes([int(x, 16) for x in element.split()]))
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@staticmethod
def extract_notification_payload(process_output):
"""
Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values.
"""
data = []
for element in process_output.splitlines()[1:]:
parts = element.split(": ")
if len(parts) == 2:
data.append(parts[1])
return data
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: timeout - timeout in seconds
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter read_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-read -a {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
_LOGGER.debug("Got \"%s\" from gatttool", result)
# Parse the output
if "read failed" in result:
raise BluetoothBackendException("Read error from gatttool: {}".format(result))
res = re.search("( [0-9a-fA-F][0-9a-fA-F])+", result)
if res:
_LOGGER.debug(
"Exit read_ble with result (%s)", current_thread())
return bytes([int(x, 16) for x in res.group(0).split()])
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit read_ble, no data ({})".format(current_thread()))
@staticmethod
def check_backend() -> bool:
"""Check if gatttool is available on the system."""
try:
call('gatttool', stdout=PIPE, stderr=PIPE)
return True
except OSError as os_err:
msg = 'gatttool not found: {}'.format(str(os_err))
_LOGGER.error(msg)
return False
@staticmethod
def byte_to_handle(in_byte: int) -> str:
"""Convert a byte array to a handle string."""
return '0x'+'{:02x}'.format(in_byte).upper()
@staticmethod
def bytes_to_string(raw_data: bytes, prefix: bool = False) -> str:
"""Convert a byte array to a hex string."""
prefix_string = ''
if prefix:
prefix_string = '0x'
suffix = ''.join([format(c, "02x") for c in raw_data])
return prefix_string + suffix.upper()
|
ChristianKuehnel/btlewrap
|
btlewrap/gatttool.py
|
GatttoolBackend.write_handle
|
python
|
def write_handle(self, handle: int, value: bytes):
# noqa: C901
# pylint: disable=arguments-differ
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(value), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output
if "successfully" in result:
_LOGGER.debug(
"Exit write_ble with result (%s)", current_thread())
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
|
Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: value - value to write to the given handle
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/gatttool.py#L62-L116
|
[
"def is_connected(self) -> bool:\n \"\"\"Check if we are connected to the backend.\"\"\"\n return self._mac is not None\n"
] |
class GatttoolBackend(AbstractBackend):
""" Backend using gatttool."""
# pylint: disable=subprocess-popen-preexec-fn
def __init__(self, adapter: str = 'hci0', retries: int = 3, timeout: float = 20, address_type: str = 'public'):
super(GatttoolBackend, self).__init__(adapter)
self.adapter = adapter
self.retries = retries
self.timeout = timeout
self.address_type = address_type
self._mac = None
def connect(self, mac: str):
"""Connect to sensor.
Connection handling is not required when using gatttool, but we still need the mac
"""
self._mac = mac
def disconnect(self):
"""Disconnect from sensor.
Connection handling is not required when using gatttool.
"""
self._mac = None
def is_connected(self) -> bool:
"""Check if we are connected to the backend."""
return self._mac is not None
@wrap_exception
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
"""Listen for characteristics changes from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
a value of 0x0100 is written to register for listening
@param: delegate - gatttool receives the
--listen argument and the delegate object's handleNotification is
called for every returned row
@param: notification_timeout
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(self._DATA_MODE_LISTEN),
self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s", notification_timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=notification_timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group, because listening always hangs
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Listening stopped forcefully after timeout.")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output to determine success
if "successfully" in result:
_LOGGER.debug("Exit write_ble with result (%s)", current_thread())
# extract useful data.
for element in self.extract_notification_payload(result):
delegate.handleNotification(handle, bytes([int(x, 16) for x in element.split()]))
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@staticmethod
def extract_notification_payload(process_output):
"""
Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values.
"""
data = []
for element in process_output.splitlines()[1:]:
parts = element.split(": ")
if len(parts) == 2:
data.append(parts[1])
return data
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: timeout - timeout in seconds
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter read_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-read -a {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
_LOGGER.debug("Got \"%s\" from gatttool", result)
# Parse the output
if "read failed" in result:
raise BluetoothBackendException("Read error from gatttool: {}".format(result))
res = re.search("( [0-9a-fA-F][0-9a-fA-F])+", result)
if res:
_LOGGER.debug(
"Exit read_ble with result (%s)", current_thread())
return bytes([int(x, 16) for x in res.group(0).split()])
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit read_ble, no data ({})".format(current_thread()))
@staticmethod
def check_backend() -> bool:
"""Check if gatttool is available on the system."""
try:
call('gatttool', stdout=PIPE, stderr=PIPE)
return True
except OSError as os_err:
msg = 'gatttool not found: {}'.format(str(os_err))
_LOGGER.error(msg)
return False
@staticmethod
def byte_to_handle(in_byte: int) -> str:
"""Convert a byte array to a handle string."""
return '0x'+'{:02x}'.format(in_byte).upper()
@staticmethod
def bytes_to_string(raw_data: bytes, prefix: bool = False) -> str:
"""Convert a byte array to a hex string."""
prefix_string = ''
if prefix:
prefix_string = '0x'
suffix = ''.join([format(c, "02x") for c in raw_data])
return prefix_string + suffix.upper()
|
ChristianKuehnel/btlewrap
|
btlewrap/gatttool.py
|
GatttoolBackend.wait_for_notification
|
python
|
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(self._DATA_MODE_LISTEN),
self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s", notification_timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=notification_timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group, because listening always hangs
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Listening stopped forcefully after timeout.")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output to determine success
if "successfully" in result:
_LOGGER.debug("Exit write_ble with result (%s)", current_thread())
# extract useful data.
for element in self.extract_notification_payload(result):
delegate.handleNotification(handle, bytes([int(x, 16) for x in element.split()]))
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
|
Listen for characteristics changes from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
a value of 0x0100 is written to register for listening
@param: delegate - gatttool receives the
--listen argument and the delegate object's handleNotification is
called for every returned row
@param: notification_timeout
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/gatttool.py#L119-L176
|
[
"def is_connected(self) -> bool:\n \"\"\"Check if we are connected to the backend.\"\"\"\n return self._mac is not None\n"
] |
class GatttoolBackend(AbstractBackend):
""" Backend using gatttool."""
# pylint: disable=subprocess-popen-preexec-fn
def __init__(self, adapter: str = 'hci0', retries: int = 3, timeout: float = 20, address_type: str = 'public'):
super(GatttoolBackend, self).__init__(adapter)
self.adapter = adapter
self.retries = retries
self.timeout = timeout
self.address_type = address_type
self._mac = None
def connect(self, mac: str):
"""Connect to sensor.
Connection handling is not required when using gatttool, but we still need the mac
"""
self._mac = mac
def disconnect(self):
"""Disconnect from sensor.
Connection handling is not required when using gatttool.
"""
self._mac = None
def is_connected(self) -> bool:
"""Check if we are connected to the backend."""
return self._mac is not None
@wrap_exception
def write_handle(self, handle: int, value: bytes):
# noqa: C901
# pylint: disable=arguments-differ
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: value - value to write to the given handle
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(value), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output
if "successfully" in result:
_LOGGER.debug(
"Exit write_ble with result (%s)", current_thread())
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@wrap_exception
@staticmethod
def extract_notification_payload(process_output):
"""
Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values.
"""
data = []
for element in process_output.splitlines()[1:]:
parts = element.split(": ")
if len(parts) == 2:
data.append(parts[1])
return data
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: timeout - timeout in seconds
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter read_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-read -a {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
_LOGGER.debug("Got \"%s\" from gatttool", result)
# Parse the output
if "read failed" in result:
raise BluetoothBackendException("Read error from gatttool: {}".format(result))
res = re.search("( [0-9a-fA-F][0-9a-fA-F])+", result)
if res:
_LOGGER.debug(
"Exit read_ble with result (%s)", current_thread())
return bytes([int(x, 16) for x in res.group(0).split()])
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit read_ble, no data ({})".format(current_thread()))
@staticmethod
def check_backend() -> bool:
"""Check if gatttool is available on the system."""
try:
call('gatttool', stdout=PIPE, stderr=PIPE)
return True
except OSError as os_err:
msg = 'gatttool not found: {}'.format(str(os_err))
_LOGGER.error(msg)
return False
@staticmethod
def byte_to_handle(in_byte: int) -> str:
"""Convert a byte array to a handle string."""
return '0x'+'{:02x}'.format(in_byte).upper()
@staticmethod
def bytes_to_string(raw_data: bytes, prefix: bool = False) -> str:
"""Convert a byte array to a hex string."""
prefix_string = ''
if prefix:
prefix_string = '0x'
suffix = ''.join([format(c, "02x") for c in raw_data])
return prefix_string + suffix.upper()
|
ChristianKuehnel/btlewrap
|
btlewrap/gatttool.py
|
GatttoolBackend.extract_notification_payload
|
python
|
def extract_notification_payload(process_output):
data = []
for element in process_output.splitlines()[1:]:
parts = element.split(": ")
if len(parts) == 2:
data.append(parts[1])
return data
|
Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/gatttool.py#L179-L202
| null |
class GatttoolBackend(AbstractBackend):
""" Backend using gatttool."""
# pylint: disable=subprocess-popen-preexec-fn
def __init__(self, adapter: str = 'hci0', retries: int = 3, timeout: float = 20, address_type: str = 'public'):
super(GatttoolBackend, self).__init__(adapter)
self.adapter = adapter
self.retries = retries
self.timeout = timeout
self.address_type = address_type
self._mac = None
def connect(self, mac: str):
"""Connect to sensor.
Connection handling is not required when using gatttool, but we still need the mac
"""
self._mac = mac
def disconnect(self):
"""Disconnect from sensor.
Connection handling is not required when using gatttool.
"""
self._mac = None
def is_connected(self) -> bool:
"""Check if we are connected to the backend."""
return self._mac is not None
@wrap_exception
def write_handle(self, handle: int, value: bytes):
# noqa: C901
# pylint: disable=arguments-differ
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: value - value to write to the given handle
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(value), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output
if "successfully" in result:
_LOGGER.debug(
"Exit write_ble with result (%s)", current_thread())
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
"""Listen for characteristics changes from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
a value of 0x0100 is written to register for listening
@param: delegate - gatttool receives the
--listen argument and the delegate object's handleNotification is
called for every returned row
@param: notification_timeout
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(self._DATA_MODE_LISTEN),
self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s", notification_timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=notification_timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group, because listening always hangs
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Listening stopped forcefully after timeout.")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output to determine success
if "successfully" in result:
_LOGGER.debug("Exit write_ble with result (%s)", current_thread())
# extract useful data.
for element in self.extract_notification_payload(result):
delegate.handleNotification(handle, bytes([int(x, 16) for x in element.split()]))
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@staticmethod
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: timeout - timeout in seconds
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter read_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-read -a {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
_LOGGER.debug("Got \"%s\" from gatttool", result)
# Parse the output
if "read failed" in result:
raise BluetoothBackendException("Read error from gatttool: {}".format(result))
res = re.search("( [0-9a-fA-F][0-9a-fA-F])+", result)
if res:
_LOGGER.debug(
"Exit read_ble with result (%s)", current_thread())
return bytes([int(x, 16) for x in res.group(0).split()])
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit read_ble, no data ({})".format(current_thread()))
@staticmethod
def check_backend() -> bool:
"""Check if gatttool is available on the system."""
try:
call('gatttool', stdout=PIPE, stderr=PIPE)
return True
except OSError as os_err:
msg = 'gatttool not found: {}'.format(str(os_err))
_LOGGER.error(msg)
return False
@staticmethod
def byte_to_handle(in_byte: int) -> str:
"""Convert a byte array to a handle string."""
return '0x'+'{:02x}'.format(in_byte).upper()
@staticmethod
def bytes_to_string(raw_data: bytes, prefix: bool = False) -> str:
"""Convert a byte array to a hex string."""
prefix_string = ''
if prefix:
prefix_string = '0x'
suffix = ''.join([format(c, "02x") for c in raw_data])
return prefix_string + suffix.upper()
|
ChristianKuehnel/btlewrap
|
btlewrap/gatttool.py
|
GatttoolBackend.check_backend
|
python
|
def check_backend() -> bool:
try:
call('gatttool', stdout=PIPE, stderr=PIPE)
return True
except OSError as os_err:
msg = 'gatttool not found: {}'.format(str(os_err))
_LOGGER.error(msg)
return False
|
Check if gatttool is available on the system.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/gatttool.py#L260-L268
| null |
class GatttoolBackend(AbstractBackend):
""" Backend using gatttool."""
# pylint: disable=subprocess-popen-preexec-fn
def __init__(self, adapter: str = 'hci0', retries: int = 3, timeout: float = 20, address_type: str = 'public'):
super(GatttoolBackend, self).__init__(adapter)
self.adapter = adapter
self.retries = retries
self.timeout = timeout
self.address_type = address_type
self._mac = None
def connect(self, mac: str):
"""Connect to sensor.
Connection handling is not required when using gatttool, but we still need the mac
"""
self._mac = mac
def disconnect(self):
"""Disconnect from sensor.
Connection handling is not required when using gatttool.
"""
self._mac = None
def is_connected(self) -> bool:
"""Check if we are connected to the backend."""
return self._mac is not None
@wrap_exception
def write_handle(self, handle: int, value: bytes):
# noqa: C901
# pylint: disable=arguments-differ
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: value - value to write to the given handle
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(value), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output
if "successfully" in result:
_LOGGER.debug(
"Exit write_ble with result (%s)", current_thread())
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
"""Listen for characteristics changes from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
a value of 0x0100 is written to register for listening
@param: delegate - gatttool receives the
--listen argument and the delegate object's handleNotification is
called for every returned row
@param: notification_timeout
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(self._DATA_MODE_LISTEN),
self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s", notification_timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=notification_timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group, because listening always hangs
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Listening stopped forcefully after timeout.")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output to determine success
if "successfully" in result:
_LOGGER.debug("Exit write_ble with result (%s)", current_thread())
# extract useful data.
for element in self.extract_notification_payload(result):
delegate.handleNotification(handle, bytes([int(x, 16) for x in element.split()]))
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@staticmethod
def extract_notification_payload(process_output):
"""
Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values.
"""
data = []
for element in process_output.splitlines()[1:]:
parts = element.split(": ")
if len(parts) == 2:
data.append(parts[1])
return data
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: timeout - timeout in seconds
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter read_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-read -a {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
_LOGGER.debug("Got \"%s\" from gatttool", result)
# Parse the output
if "read failed" in result:
raise BluetoothBackendException("Read error from gatttool: {}".format(result))
res = re.search("( [0-9a-fA-F][0-9a-fA-F])+", result)
if res:
_LOGGER.debug(
"Exit read_ble with result (%s)", current_thread())
return bytes([int(x, 16) for x in res.group(0).split()])
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit read_ble, no data ({})".format(current_thread()))
@staticmethod
@staticmethod
def byte_to_handle(in_byte: int) -> str:
"""Convert a byte array to a handle string."""
return '0x'+'{:02x}'.format(in_byte).upper()
@staticmethod
def bytes_to_string(raw_data: bytes, prefix: bool = False) -> str:
"""Convert a byte array to a hex string."""
prefix_string = ''
if prefix:
prefix_string = '0x'
suffix = ''.join([format(c, "02x") for c in raw_data])
return prefix_string + suffix.upper()
|
ChristianKuehnel/btlewrap
|
btlewrap/gatttool.py
|
GatttoolBackend.bytes_to_string
|
python
|
def bytes_to_string(raw_data: bytes, prefix: bool = False) -> str:
prefix_string = ''
if prefix:
prefix_string = '0x'
suffix = ''.join([format(c, "02x") for c in raw_data])
return prefix_string + suffix.upper()
|
Convert a byte array to a hex string.
|
train
|
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/gatttool.py#L276-L282
| null |
class GatttoolBackend(AbstractBackend):
""" Backend using gatttool."""
# pylint: disable=subprocess-popen-preexec-fn
def __init__(self, adapter: str = 'hci0', retries: int = 3, timeout: float = 20, address_type: str = 'public'):
super(GatttoolBackend, self).__init__(adapter)
self.adapter = adapter
self.retries = retries
self.timeout = timeout
self.address_type = address_type
self._mac = None
def connect(self, mac: str):
"""Connect to sensor.
Connection handling is not required when using gatttool, but we still need the mac
"""
self._mac = mac
def disconnect(self):
"""Disconnect from sensor.
Connection handling is not required when using gatttool.
"""
self._mac = None
def is_connected(self) -> bool:
"""Check if we are connected to the backend."""
return self._mac is not None
@wrap_exception
def write_handle(self, handle: int, value: bytes):
# noqa: C901
# pylint: disable=arguments-differ
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: value - value to write to the given handle
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(value), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output
if "successfully" in result:
_LOGGER.debug(
"Exit write_ble with result (%s)", current_thread())
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@wrap_exception
def wait_for_notification(self, handle: int, delegate, notification_timeout: float):
"""Listen for characteristics changes from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
a value of 0x0100 is written to register for listening
@param: delegate - gatttool receives the
--listen argument and the delegate object's handleNotification is
called for every returned row
@param: notification_timeout
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter write_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(self._DATA_MODE_LISTEN),
self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s", notification_timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=notification_timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group, because listening always hangs
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Listening stopped forcefully after timeout.")
result = result.decode("utf-8").strip(' \n\t')
if "Write Request failed" in result:
raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result))
_LOGGER.debug("Got %s from gatttool", result)
# Parse the output to determine success
if "successfully" in result:
_LOGGER.debug("Exit write_ble with result (%s)", current_thread())
# extract useful data.
for element in self.extract_notification_payload(result):
delegate.handleNotification(handle, bytes([int(x, 16) for x in element.split()]))
return True
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
@staticmethod
def extract_notification_payload(process_output):
"""
Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values.
"""
data = []
for element in process_output.splitlines()[1:]:
parts = element.split(": ")
if len(parts) == 2:
data.append(parts[1])
return data
@wrap_exception
def read_handle(self, handle: int) -> bytes:
"""Read from a BLE address.
@param: mac - MAC address in format XX:XX:XX:XX:XX:XX
@param: handle - BLE characteristics handle in format 0xXX
@param: timeout - timeout in seconds
"""
if not self.is_connected():
raise BluetoothBackendException('Not connected to any device.')
attempt = 0
delay = 10
_LOGGER.debug("Enter read_ble (%s)", current_thread())
while attempt <= self.retries:
cmd = "gatttool --device={} --addr-type={} --char-read -a {} --adapter={}".format(
self._mac, self.address_type, self.byte_to_handle(handle), self.adapter)
_LOGGER.debug("Running gatttool with a timeout of %d: %s",
self.timeout, cmd)
with Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid) as process:
try:
result = process.communicate(timeout=self.timeout)[0]
_LOGGER.debug("Finished gatttool")
except TimeoutExpired:
# send signal to the process group
os.killpg(process.pid, signal.SIGINT)
result = process.communicate()[0]
_LOGGER.debug("Killed hanging gatttool")
result = result.decode("utf-8").strip(' \n\t')
_LOGGER.debug("Got \"%s\" from gatttool", result)
# Parse the output
if "read failed" in result:
raise BluetoothBackendException("Read error from gatttool: {}".format(result))
res = re.search("( [0-9a-fA-F][0-9a-fA-F])+", result)
if res:
_LOGGER.debug(
"Exit read_ble with result (%s)", current_thread())
return bytes([int(x, 16) for x in res.group(0).split()])
attempt += 1
_LOGGER.debug("Waiting for %s seconds before retrying", delay)
if attempt < self.retries:
time.sleep(delay)
delay *= 2
raise BluetoothBackendException("Exit read_ble, no data ({})".format(current_thread()))
@staticmethod
def check_backend() -> bool:
"""Check if gatttool is available on the system."""
try:
call('gatttool', stdout=PIPE, stderr=PIPE)
return True
except OSError as os_err:
msg = 'gatttool not found: {}'.format(str(os_err))
_LOGGER.error(msg)
return False
@staticmethod
def byte_to_handle(in_byte: int) -> str:
"""Convert a byte array to a handle string."""
return '0x'+'{:02x}'.format(in_byte).upper()
@staticmethod
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.CoerceValue
|
python
|
def CoerceValue(value, value_type):
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
|
Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L176-L270
|
[
"def CoerceValue(value, value_type):\n \"\"\"Coerces a single value into the type expected for its column.\n\n Internal helper method.\n\n Args:\n value: The value which should be converted\n value_type: One of \"string\", \"number\", \"boolean\", \"date\", \"datetime\" or\n \"timeofday\".\n\n Returns:\n An item of the Python type appropriate to the given value_type. Strings\n are also converted to Unicode using UTF-8 encoding if necessary.\n If a tuple is given, it should be in one of the following forms:\n - (value, formatted value)\n - (value, formatted value, custom properties)\n where the formatted value is a string, and custom properties is a\n dictionary of the custom properties for this cell.\n To specify custom properties without specifying formatted value, one can\n pass None as the formatted value.\n One can also have a null-valued cell with formatted value and/or custom\n properties by specifying None for the value.\n This method ignores the custom properties except for checking that it is a\n dictionary. The custom properties are handled in the ToJSon and ToJSCode\n methods.\n The real type of the given value is not strictly checked. For example,\n any type can be used for string - as we simply take its str( ) and for\n boolean value we just check \"if value\".\n Examples:\n CoerceValue(None, \"string\") returns None\n CoerceValue((5, \"5$\"), \"number\") returns (5, \"5$\")\n CoerceValue(100, \"string\") returns \"100\"\n CoerceValue(0, \"boolean\") returns False\n\n Raises:\n DataTableException: The value and type did not match in a not-recoverable\n way, for example given value 'abc' for type 'number'.\n \"\"\"\n if isinstance(value, tuple):\n # In case of a tuple, we run the same function on the value itself and\n # add the formatted value.\n if (len(value) not in [2, 3] or\n (len(value) == 3 and not isinstance(value[2], dict))):\n raise DataTableException(\"Wrong format for value and formatting - %s.\" %\n str(value))\n if not isinstance(value[1], six.string_types + (type(None),)):\n raise DataTableException(\"Formatted value is not string, given %s.\" %\n type(value[1]))\n js_value = DataTable.CoerceValue(value[0], value_type)\n return (js_value,) + value[1:]\n\n t_value = type(value)\n if value is None:\n return value\n if value_type == \"boolean\":\n return bool(value)\n\n elif value_type == \"number\":\n if isinstance(value, six.integer_types + (float,)):\n return value\n raise DataTableException(\"Wrong type %s when expected number\" % t_value)\n\n elif value_type == \"string\":\n if isinstance(value, six.text_type):\n return value\n if isinstance(value, bytes):\n return six.text_type(value, encoding=\"utf-8\")\n else:\n return six.text_type(value)\n\n elif value_type == \"date\":\n if isinstance(value, datetime.datetime):\n return datetime.date(value.year, value.month, value.day)\n elif isinstance(value, datetime.date):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected date\" % t_value)\n\n elif value_type == \"timeofday\":\n if isinstance(value, datetime.datetime):\n return datetime.time(value.hour, value.minute, value.second)\n elif isinstance(value, datetime.time):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected time\" % t_value)\n\n elif value_type == \"datetime\":\n if isinstance(value, datetime.datetime):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected datetime\" %\n t_value)\n # If we got here, it means the given value_type was not one of the\n # supported types.\n raise DataTableException(\"Unsupported type %s\" % value_type)\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.ColumnTypeParser
|
python
|
def ColumnTypeParser(description):
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
|
Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L316-L375
| null |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.TableDescriptionParser
|
python
|
def TableDescriptionParser(table_description, depth=0):
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
|
Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L378-L525
|
[
"def ColumnTypeParser(description):\n \"\"\"Parses a single column description. Internal helper method.\n\n Args:\n description: a column description in the possible formats:\n 'id'\n ('id',)\n ('id', 'type')\n ('id', 'type', 'label')\n ('id', 'type', 'label', {'custom_prop1': 'custom_val1'})\n Returns:\n Dictionary with the following keys: id, label, type, and\n custom_properties where:\n - If label not given, it equals the id.\n - If type not given, string is used by default.\n - If custom properties are not given, an empty dictionary is used by\n default.\n\n Raises:\n DataTableException: The column description did not match the RE, or\n unsupported type was passed.\n \"\"\"\n if not description:\n raise DataTableException(\"Description error: empty description given\")\n\n if not isinstance(description, (six.string_types, tuple)):\n raise DataTableException(\"Description error: expected either string or \"\n \"tuple, got %s.\" % type(description))\n\n if isinstance(description, six.string_types):\n description = (description,)\n\n # According to the tuple's length, we fill the keys\n # We verify everything is of type string\n for elem in description[:3]:\n if not isinstance(elem, six.string_types):\n raise DataTableException(\"Description error: expected tuple of \"\n \"strings, current element of type %s.\" %\n type(elem))\n desc_dict = {\"id\": description[0],\n \"label\": description[0],\n \"type\": \"string\",\n \"custom_properties\": {}}\n if len(description) > 1:\n desc_dict[\"type\"] = description[1].lower()\n if len(description) > 2:\n desc_dict[\"label\"] = description[2]\n if len(description) > 3:\n if not isinstance(description[3], dict):\n raise DataTableException(\"Description error: expected custom \"\n \"properties of type dict, current element \"\n \"of type %s.\" % type(description[3]))\n desc_dict[\"custom_properties\"] = description[3]\n if len(description) > 4:\n raise DataTableException(\"Description error: tuple of length > 4\")\n if desc_dict[\"type\"] not in [\"string\", \"number\", \"boolean\",\n \"date\", \"datetime\", \"timeofday\"]:\n raise DataTableException(\n \"Description error: unsupported type '%s'\" % desc_dict[\"type\"])\n return desc_dict\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.SetRowsCustomProperties
|
python
|
def SetRowsCustomProperties(self, rows, custom_properties):
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
|
Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L536-L550
| null |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.LoadData
|
python
|
def LoadData(self, data, custom_properties=None):
self.__data = []
self.AppendData(data, custom_properties)
|
Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L552-L565
|
[
"def AppendData(self, data, custom_properties=None):\n \"\"\"Appends new data to the table.\n\n Data is appended in rows. Data must comply with\n the table schema passed in to __init__(). See CoerceValue() for a list\n of acceptable data types. See the class documentation for more information\n and examples of schema and data values.\n\n Args:\n data: The row to add to the table. The data must conform to the table\n description format.\n custom_properties: A dictionary of string to string, representing the\n custom properties to add to all the rows.\n\n Raises:\n DataTableException: The data structure does not match the description.\n \"\"\"\n # If the maximal depth is 0, we simply iterate over the data table\n # lines and insert them using _InnerAppendData. Otherwise, we simply\n # let the _InnerAppendData handle all the levels.\n if not self.__columns[-1][\"depth\"]:\n for row in data:\n self._InnerAppendData(({}, custom_properties), row, 0)\n else:\n self._InnerAppendData(({}, custom_properties), data, 0)\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.AppendData
|
python
|
def AppendData(self, data, custom_properties=None):
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
|
Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L567-L591
|
[
"def _InnerAppendData(self, prev_col_values, data, col_index):\n \"\"\"Inner function to assist LoadData.\"\"\"\n # We first check that col_index has not exceeded the columns size\n if col_index >= len(self.__columns):\n raise DataTableException(\"The data does not match description, too deep\")\n\n # Dealing with the scalar case, the data is the last value.\n if self.__columns[col_index][\"container\"] == \"scalar\":\n prev_col_values[0][self.__columns[col_index][\"id\"]] = data\n self.__data.append(prev_col_values)\n return\n\n if self.__columns[col_index][\"container\"] == \"iter\":\n if not hasattr(data, \"__iter__\") or isinstance(data, dict):\n raise DataTableException(\"Expected iterable object, got %s\" %\n type(data))\n # We only need to insert the rest of the columns\n # If there are less items than expected, we only add what there is.\n for value in data:\n if col_index >= len(self.__columns):\n raise DataTableException(\"Too many elements given in data\")\n prev_col_values[0][self.__columns[col_index][\"id\"]] = value\n col_index += 1\n self.__data.append(prev_col_values)\n return\n\n # We know the current level is a dictionary, we verify the type.\n if not isinstance(data, dict):\n raise DataTableException(\"Expected dictionary at current level, got %s\" %\n type(data))\n # We check if this is the last level\n if self.__columns[col_index][\"depth\"] == self.__columns[-1][\"depth\"]:\n # We need to add the keys in the dictionary as they are\n for col in self.__columns[col_index:]:\n if col[\"id\"] in data:\n prev_col_values[0][col[\"id\"]] = data[col[\"id\"]]\n self.__data.append(prev_col_values)\n return\n\n # We have a dictionary in an inner depth level.\n if not data.keys():\n # In case this is an empty dictionary, we add a record with the columns\n # filled only until this point.\n self.__data.append(prev_col_values)\n else:\n for key in sorted(data):\n col_values = dict(prev_col_values[0])\n col_values[self.__columns[col_index][\"id\"]] = key\n self._InnerAppendData((col_values, prev_col_values[1]),\n data[key], col_index + 1)\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable._InnerAppendData
|
python
|
def _InnerAppendData(self, prev_col_values, data, col_index):
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
|
Inner function to assist LoadData.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L593-L642
|
[
"def _InnerAppendData(self, prev_col_values, data, col_index):\n \"\"\"Inner function to assist LoadData.\"\"\"\n # We first check that col_index has not exceeded the columns size\n if col_index >= len(self.__columns):\n raise DataTableException(\"The data does not match description, too deep\")\n\n # Dealing with the scalar case, the data is the last value.\n if self.__columns[col_index][\"container\"] == \"scalar\":\n prev_col_values[0][self.__columns[col_index][\"id\"]] = data\n self.__data.append(prev_col_values)\n return\n\n if self.__columns[col_index][\"container\"] == \"iter\":\n if not hasattr(data, \"__iter__\") or isinstance(data, dict):\n raise DataTableException(\"Expected iterable object, got %s\" %\n type(data))\n # We only need to insert the rest of the columns\n # If there are less items than expected, we only add what there is.\n for value in data:\n if col_index >= len(self.__columns):\n raise DataTableException(\"Too many elements given in data\")\n prev_col_values[0][self.__columns[col_index][\"id\"]] = value\n col_index += 1\n self.__data.append(prev_col_values)\n return\n\n # We know the current level is a dictionary, we verify the type.\n if not isinstance(data, dict):\n raise DataTableException(\"Expected dictionary at current level, got %s\" %\n type(data))\n # We check if this is the last level\n if self.__columns[col_index][\"depth\"] == self.__columns[-1][\"depth\"]:\n # We need to add the keys in the dictionary as they are\n for col in self.__columns[col_index:]:\n if col[\"id\"] in data:\n prev_col_values[0][col[\"id\"]] = data[col[\"id\"]]\n self.__data.append(prev_col_values)\n return\n\n # We have a dictionary in an inner depth level.\n if not data.keys():\n # In case this is an empty dictionary, we add a record with the columns\n # filled only until this point.\n self.__data.append(prev_col_values)\n else:\n for key in sorted(data):\n col_values = dict(prev_col_values[0])\n col_values[self.__columns[col_index][\"id\"]] = key\n self._InnerAppendData((col_values, prev_col_values[1]),\n data[key], col_index + 1)\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable._PreparedData
|
python
|
def _PreparedData(self, order_by=()):
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
|
Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L644-L681
| null |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.ToJSCode
|
python
|
def ToJSCode(self, name, columns_order=None, order_by=()):
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
|
Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L683-L768
|
[
"def _PreparedData(self, order_by=()):\n \"\"\"Prepares the data for enumeration - sorting it by order_by.\n\n Args:\n order_by: Optional. Specifies the name of the column(s) to sort by, and\n (optionally) which direction to sort in. Default sort direction\n is asc. Following formats are accepted:\n \"string_col_name\" -- For a single key in default (asc) order.\n (\"string_col_name\", \"asc|desc\") -- For a single key.\n [(\"col_1\",\"asc|desc\"), (\"col_2\",\"asc|desc\")] -- For more than\n one column, an array of tuples of (col_name, \"asc|desc\").\n\n Returns:\n The data sorted by the keys given.\n\n Raises:\n DataTableException: Sort direction not in 'asc' or 'desc'\n \"\"\"\n if not order_by:\n return self.__data\n\n sorted_data = self.__data[:]\n if isinstance(order_by, six.string_types) or (\n isinstance(order_by, tuple) and len(order_by) == 2 and\n order_by[1].lower() in [\"asc\", \"desc\"]):\n order_by = (order_by,)\n for key in reversed(order_by):\n if isinstance(key, six.string_types):\n sorted_data.sort(key=lambda x: x[0].get(key))\n elif (isinstance(key, (list, tuple)) and len(key) == 2 and\n key[1].lower() in (\"asc\", \"desc\")):\n key_func = lambda x: x[0].get(key[0])\n sorted_data.sort(key=key_func, reverse=key[1].lower() != \"asc\")\n else:\n raise DataTableException(\"Expected tuple with second value: \"\n \"'asc' or 'desc'\")\n\n return sorted_data\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.ToHtml
|
python
|
def ToHtml(self, columns_order=None, order_by=()):
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
|
Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L770-L831
|
[
"def CoerceValue(value, value_type):\n \"\"\"Coerces a single value into the type expected for its column.\n\n Internal helper method.\n\n Args:\n value: The value which should be converted\n value_type: One of \"string\", \"number\", \"boolean\", \"date\", \"datetime\" or\n \"timeofday\".\n\n Returns:\n An item of the Python type appropriate to the given value_type. Strings\n are also converted to Unicode using UTF-8 encoding if necessary.\n If a tuple is given, it should be in one of the following forms:\n - (value, formatted value)\n - (value, formatted value, custom properties)\n where the formatted value is a string, and custom properties is a\n dictionary of the custom properties for this cell.\n To specify custom properties without specifying formatted value, one can\n pass None as the formatted value.\n One can also have a null-valued cell with formatted value and/or custom\n properties by specifying None for the value.\n This method ignores the custom properties except for checking that it is a\n dictionary. The custom properties are handled in the ToJSon and ToJSCode\n methods.\n The real type of the given value is not strictly checked. For example,\n any type can be used for string - as we simply take its str( ) and for\n boolean value we just check \"if value\".\n Examples:\n CoerceValue(None, \"string\") returns None\n CoerceValue((5, \"5$\"), \"number\") returns (5, \"5$\")\n CoerceValue(100, \"string\") returns \"100\"\n CoerceValue(0, \"boolean\") returns False\n\n Raises:\n DataTableException: The value and type did not match in a not-recoverable\n way, for example given value 'abc' for type 'number'.\n \"\"\"\n if isinstance(value, tuple):\n # In case of a tuple, we run the same function on the value itself and\n # add the formatted value.\n if (len(value) not in [2, 3] or\n (len(value) == 3 and not isinstance(value[2], dict))):\n raise DataTableException(\"Wrong format for value and formatting - %s.\" %\n str(value))\n if not isinstance(value[1], six.string_types + (type(None),)):\n raise DataTableException(\"Formatted value is not string, given %s.\" %\n type(value[1]))\n js_value = DataTable.CoerceValue(value[0], value_type)\n return (js_value,) + value[1:]\n\n t_value = type(value)\n if value is None:\n return value\n if value_type == \"boolean\":\n return bool(value)\n\n elif value_type == \"number\":\n if isinstance(value, six.integer_types + (float,)):\n return value\n raise DataTableException(\"Wrong type %s when expected number\" % t_value)\n\n elif value_type == \"string\":\n if isinstance(value, six.text_type):\n return value\n if isinstance(value, bytes):\n return six.text_type(value, encoding=\"utf-8\")\n else:\n return six.text_type(value)\n\n elif value_type == \"date\":\n if isinstance(value, datetime.datetime):\n return datetime.date(value.year, value.month, value.day)\n elif isinstance(value, datetime.date):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected date\" % t_value)\n\n elif value_type == \"timeofday\":\n if isinstance(value, datetime.datetime):\n return datetime.time(value.hour, value.minute, value.second)\n elif isinstance(value, datetime.time):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected time\" % t_value)\n\n elif value_type == \"datetime\":\n if isinstance(value, datetime.datetime):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected datetime\" %\n t_value)\n # If we got here, it means the given value_type was not one of the\n # supported types.\n raise DataTableException(\"Unsupported type %s\" % value_type)\n",
"def ToString(value):\n if value is None:\n return \"(empty)\"\n elif isinstance(value, (datetime.datetime,\n datetime.date,\n datetime.time)):\n return str(value)\n elif isinstance(value, six.text_type):\n return value\n elif isinstance(value, bool):\n return str(value).lower()\n elif isinstance(value, bytes):\n return six.text_type(value, encoding=\"utf-8\")\n else:\n return six.text_type(value)\n",
"def _PreparedData(self, order_by=()):\n \"\"\"Prepares the data for enumeration - sorting it by order_by.\n\n Args:\n order_by: Optional. Specifies the name of the column(s) to sort by, and\n (optionally) which direction to sort in. Default sort direction\n is asc. Following formats are accepted:\n \"string_col_name\" -- For a single key in default (asc) order.\n (\"string_col_name\", \"asc|desc\") -- For a single key.\n [(\"col_1\",\"asc|desc\"), (\"col_2\",\"asc|desc\")] -- For more than\n one column, an array of tuples of (col_name, \"asc|desc\").\n\n Returns:\n The data sorted by the keys given.\n\n Raises:\n DataTableException: Sort direction not in 'asc' or 'desc'\n \"\"\"\n if not order_by:\n return self.__data\n\n sorted_data = self.__data[:]\n if isinstance(order_by, six.string_types) or (\n isinstance(order_by, tuple) and len(order_by) == 2 and\n order_by[1].lower() in [\"asc\", \"desc\"]):\n order_by = (order_by,)\n for key in reversed(order_by):\n if isinstance(key, six.string_types):\n sorted_data.sort(key=lambda x: x[0].get(key))\n elif (isinstance(key, (list, tuple)) and len(key) == 2 and\n key[1].lower() in (\"asc\", \"desc\")):\n key_func = lambda x: x[0].get(key[0])\n sorted_data.sort(key=key_func, reverse=key[1].lower() != \"asc\")\n else:\n raise DataTableException(\"Expected tuple with second value: \"\n \"'asc' or 'desc'\")\n\n return sorted_data\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.ToCsv
|
python
|
def ToCsv(self, columns_order=None, order_by=(), separator=","):
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
|
Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L833-L893
|
[
"def CoerceValue(value, value_type):\n \"\"\"Coerces a single value into the type expected for its column.\n\n Internal helper method.\n\n Args:\n value: The value which should be converted\n value_type: One of \"string\", \"number\", \"boolean\", \"date\", \"datetime\" or\n \"timeofday\".\n\n Returns:\n An item of the Python type appropriate to the given value_type. Strings\n are also converted to Unicode using UTF-8 encoding if necessary.\n If a tuple is given, it should be in one of the following forms:\n - (value, formatted value)\n - (value, formatted value, custom properties)\n where the formatted value is a string, and custom properties is a\n dictionary of the custom properties for this cell.\n To specify custom properties without specifying formatted value, one can\n pass None as the formatted value.\n One can also have a null-valued cell with formatted value and/or custom\n properties by specifying None for the value.\n This method ignores the custom properties except for checking that it is a\n dictionary. The custom properties are handled in the ToJSon and ToJSCode\n methods.\n The real type of the given value is not strictly checked. For example,\n any type can be used for string - as we simply take its str( ) and for\n boolean value we just check \"if value\".\n Examples:\n CoerceValue(None, \"string\") returns None\n CoerceValue((5, \"5$\"), \"number\") returns (5, \"5$\")\n CoerceValue(100, \"string\") returns \"100\"\n CoerceValue(0, \"boolean\") returns False\n\n Raises:\n DataTableException: The value and type did not match in a not-recoverable\n way, for example given value 'abc' for type 'number'.\n \"\"\"\n if isinstance(value, tuple):\n # In case of a tuple, we run the same function on the value itself and\n # add the formatted value.\n if (len(value) not in [2, 3] or\n (len(value) == 3 and not isinstance(value[2], dict))):\n raise DataTableException(\"Wrong format for value and formatting - %s.\" %\n str(value))\n if not isinstance(value[1], six.string_types + (type(None),)):\n raise DataTableException(\"Formatted value is not string, given %s.\" %\n type(value[1]))\n js_value = DataTable.CoerceValue(value[0], value_type)\n return (js_value,) + value[1:]\n\n t_value = type(value)\n if value is None:\n return value\n if value_type == \"boolean\":\n return bool(value)\n\n elif value_type == \"number\":\n if isinstance(value, six.integer_types + (float,)):\n return value\n raise DataTableException(\"Wrong type %s when expected number\" % t_value)\n\n elif value_type == \"string\":\n if isinstance(value, six.text_type):\n return value\n if isinstance(value, bytes):\n return six.text_type(value, encoding=\"utf-8\")\n else:\n return six.text_type(value)\n\n elif value_type == \"date\":\n if isinstance(value, datetime.datetime):\n return datetime.date(value.year, value.month, value.day)\n elif isinstance(value, datetime.date):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected date\" % t_value)\n\n elif value_type == \"timeofday\":\n if isinstance(value, datetime.datetime):\n return datetime.time(value.hour, value.minute, value.second)\n elif isinstance(value, datetime.time):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected time\" % t_value)\n\n elif value_type == \"datetime\":\n if isinstance(value, datetime.datetime):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected datetime\" %\n t_value)\n # If we got here, it means the given value_type was not one of the\n # supported types.\n raise DataTableException(\"Unsupported type %s\" % value_type)\n",
"def ToString(value):\n if value is None:\n return \"(empty)\"\n elif isinstance(value, (datetime.datetime,\n datetime.date,\n datetime.time)):\n return str(value)\n elif isinstance(value, six.text_type):\n return value\n elif isinstance(value, bool):\n return str(value).lower()\n elif isinstance(value, bytes):\n return six.text_type(value, encoding=\"utf-8\")\n else:\n return six.text_type(value)\n",
"def _PreparedData(self, order_by=()):\n \"\"\"Prepares the data for enumeration - sorting it by order_by.\n\n Args:\n order_by: Optional. Specifies the name of the column(s) to sort by, and\n (optionally) which direction to sort in. Default sort direction\n is asc. Following formats are accepted:\n \"string_col_name\" -- For a single key in default (asc) order.\n (\"string_col_name\", \"asc|desc\") -- For a single key.\n [(\"col_1\",\"asc|desc\"), (\"col_2\",\"asc|desc\")] -- For more than\n one column, an array of tuples of (col_name, \"asc|desc\").\n\n Returns:\n The data sorted by the keys given.\n\n Raises:\n DataTableException: Sort direction not in 'asc' or 'desc'\n \"\"\"\n if not order_by:\n return self.__data\n\n sorted_data = self.__data[:]\n if isinstance(order_by, six.string_types) or (\n isinstance(order_by, tuple) and len(order_by) == 2 and\n order_by[1].lower() in [\"asc\", \"desc\"]):\n order_by = (order_by,)\n for key in reversed(order_by):\n if isinstance(key, six.string_types):\n sorted_data.sort(key=lambda x: x[0].get(key))\n elif (isinstance(key, (list, tuple)) and len(key) == 2 and\n key[1].lower() in (\"asc\", \"desc\")):\n key_func = lambda x: x[0].get(key[0])\n sorted_data.sort(key=key_func, reverse=key[1].lower() != \"asc\")\n else:\n raise DataTableException(\"Expected tuple with second value: \"\n \"'asc' or 'desc'\")\n\n return sorted_data\n",
"def ensure_str(s):\n \"Compatibility function. Ensures using of str rather than unicode.\"\n if isinstance(s, str):\n return s\n return s.encode(\"utf-8\")\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.ToTsvExcel
|
python
|
def ToTsvExcel(self, columns_order=None, order_by=()):
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
|
Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L895-L911
|
[
"def ToCsv(self, columns_order=None, order_by=(), separator=\",\"):\n \"\"\"Writes the data table as a CSV string.\n\n Output is encoded in UTF-8 because the Python \"csv\" module can't handle\n Unicode properly according to its documentation.\n\n Args:\n columns_order: Optional. Specifies the order of columns in the\n output table. Specify a list of all column IDs in the order\n in which you want the table created.\n Note that you must list all column IDs in this parameter,\n if you use it.\n order_by: Optional. Specifies the name of the column(s) to sort by.\n Passed as is to _PreparedData.\n separator: Optional. The separator to use between the values.\n\n Returns:\n A CSV string representing the table.\n Example result:\n 'a','b','c'\n 1,'z',2\n 3,'w',''\n\n Raises:\n DataTableException: The data does not match the type.\n \"\"\"\n\n csv_buffer = six.StringIO()\n writer = csv.writer(csv_buffer, delimiter=separator)\n\n if columns_order is None:\n columns_order = [col[\"id\"] for col in self.__columns]\n col_dict = dict([(col[\"id\"], col) for col in self.__columns])\n\n def ensure_str(s):\n \"Compatibility function. Ensures using of str rather than unicode.\"\n if isinstance(s, str):\n return s\n return s.encode(\"utf-8\")\n\n writer.writerow([ensure_str(col_dict[col][\"label\"])\n for col in columns_order])\n\n # We now go over the data and add each row\n for row, unused_cp in self._PreparedData(order_by):\n cells_list = []\n # We add all the elements of this row by their order\n for col in columns_order:\n value = \"\"\n if col in row and row[col] is not None:\n value = self.CoerceValue(row[col], col_dict[col][\"type\"])\n if isinstance(value, tuple):\n # We have a formatted value. Using it only for date/time types.\n if col_dict[col][\"type\"] in [\"date\", \"datetime\", \"timeofday\"]:\n cells_list.append(ensure_str(self.ToString(value[1])))\n else:\n cells_list.append(ensure_str(self.ToString(value[0])))\n else:\n cells_list.append(ensure_str(self.ToString(value)))\n writer.writerow(cells_list)\n return csv_buffer.getvalue()\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable._ToJSonObj
|
python
|
def _ToJSonObj(self, columns_order=None, order_by=()):
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
|
Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L913-L966
|
[
"def CoerceValue(value, value_type):\n \"\"\"Coerces a single value into the type expected for its column.\n\n Internal helper method.\n\n Args:\n value: The value which should be converted\n value_type: One of \"string\", \"number\", \"boolean\", \"date\", \"datetime\" or\n \"timeofday\".\n\n Returns:\n An item of the Python type appropriate to the given value_type. Strings\n are also converted to Unicode using UTF-8 encoding if necessary.\n If a tuple is given, it should be in one of the following forms:\n - (value, formatted value)\n - (value, formatted value, custom properties)\n where the formatted value is a string, and custom properties is a\n dictionary of the custom properties for this cell.\n To specify custom properties without specifying formatted value, one can\n pass None as the formatted value.\n One can also have a null-valued cell with formatted value and/or custom\n properties by specifying None for the value.\n This method ignores the custom properties except for checking that it is a\n dictionary. The custom properties are handled in the ToJSon and ToJSCode\n methods.\n The real type of the given value is not strictly checked. For example,\n any type can be used for string - as we simply take its str( ) and for\n boolean value we just check \"if value\".\n Examples:\n CoerceValue(None, \"string\") returns None\n CoerceValue((5, \"5$\"), \"number\") returns (5, \"5$\")\n CoerceValue(100, \"string\") returns \"100\"\n CoerceValue(0, \"boolean\") returns False\n\n Raises:\n DataTableException: The value and type did not match in a not-recoverable\n way, for example given value 'abc' for type 'number'.\n \"\"\"\n if isinstance(value, tuple):\n # In case of a tuple, we run the same function on the value itself and\n # add the formatted value.\n if (len(value) not in [2, 3] or\n (len(value) == 3 and not isinstance(value[2], dict))):\n raise DataTableException(\"Wrong format for value and formatting - %s.\" %\n str(value))\n if not isinstance(value[1], six.string_types + (type(None),)):\n raise DataTableException(\"Formatted value is not string, given %s.\" %\n type(value[1]))\n js_value = DataTable.CoerceValue(value[0], value_type)\n return (js_value,) + value[1:]\n\n t_value = type(value)\n if value is None:\n return value\n if value_type == \"boolean\":\n return bool(value)\n\n elif value_type == \"number\":\n if isinstance(value, six.integer_types + (float,)):\n return value\n raise DataTableException(\"Wrong type %s when expected number\" % t_value)\n\n elif value_type == \"string\":\n if isinstance(value, six.text_type):\n return value\n if isinstance(value, bytes):\n return six.text_type(value, encoding=\"utf-8\")\n else:\n return six.text_type(value)\n\n elif value_type == \"date\":\n if isinstance(value, datetime.datetime):\n return datetime.date(value.year, value.month, value.day)\n elif isinstance(value, datetime.date):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected date\" % t_value)\n\n elif value_type == \"timeofday\":\n if isinstance(value, datetime.datetime):\n return datetime.time(value.hour, value.minute, value.second)\n elif isinstance(value, datetime.time):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected time\" % t_value)\n\n elif value_type == \"datetime\":\n if isinstance(value, datetime.datetime):\n return value\n else:\n raise DataTableException(\"Wrong type %s when expected datetime\" %\n t_value)\n # If we got here, it means the given value_type was not one of the\n # supported types.\n raise DataTableException(\"Unsupported type %s\" % value_type)\n",
"def _PreparedData(self, order_by=()):\n \"\"\"Prepares the data for enumeration - sorting it by order_by.\n\n Args:\n order_by: Optional. Specifies the name of the column(s) to sort by, and\n (optionally) which direction to sort in. Default sort direction\n is asc. Following formats are accepted:\n \"string_col_name\" -- For a single key in default (asc) order.\n (\"string_col_name\", \"asc|desc\") -- For a single key.\n [(\"col_1\",\"asc|desc\"), (\"col_2\",\"asc|desc\")] -- For more than\n one column, an array of tuples of (col_name, \"asc|desc\").\n\n Returns:\n The data sorted by the keys given.\n\n Raises:\n DataTableException: Sort direction not in 'asc' or 'desc'\n \"\"\"\n if not order_by:\n return self.__data\n\n sorted_data = self.__data[:]\n if isinstance(order_by, six.string_types) or (\n isinstance(order_by, tuple) and len(order_by) == 2 and\n order_by[1].lower() in [\"asc\", \"desc\"]):\n order_by = (order_by,)\n for key in reversed(order_by):\n if isinstance(key, six.string_types):\n sorted_data.sort(key=lambda x: x[0].get(key))\n elif (isinstance(key, (list, tuple)) and len(key) == 2 and\n key[1].lower() in (\"asc\", \"desc\")):\n key_func = lambda x: x[0].get(key[0])\n sorted_data.sort(key=key_func, reverse=key[1].lower() != \"asc\")\n else:\n raise DataTableException(\"Expected tuple with second value: \"\n \"'asc' or 'desc'\")\n\n return sorted_data\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.ToJSon
|
python
|
def ToJSon(self, columns_order=None, order_by=()):
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
|
Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L968-L1009
|
[
"def _ToJSonObj(self, columns_order=None, order_by=()):\n \"\"\"Returns an object suitable to be converted to JSON.\n\n Args:\n columns_order: Optional. A list of all column IDs in the order in which\n you want them created in the output table. If specified,\n all column IDs must be present.\n order_by: Optional. Specifies the name of the column(s) to sort by.\n Passed as is to _PreparedData().\n\n Returns:\n A dictionary object for use by ToJSon or ToJSonResponse.\n \"\"\"\n if columns_order is None:\n columns_order = [col[\"id\"] for col in self.__columns]\n col_dict = dict([(col[\"id\"], col) for col in self.__columns])\n\n # Creating the column JSON objects\n col_objs = []\n for col_id in columns_order:\n col_obj = {\"id\": col_dict[col_id][\"id\"],\n \"label\": col_dict[col_id][\"label\"],\n \"type\": col_dict[col_id][\"type\"]}\n if col_dict[col_id][\"custom_properties\"]:\n col_obj[\"p\"] = col_dict[col_id][\"custom_properties\"]\n col_objs.append(col_obj)\n\n # Creating the rows jsons\n row_objs = []\n for row, cp in self._PreparedData(order_by):\n cell_objs = []\n for col in columns_order:\n value = self.CoerceValue(row.get(col, None), col_dict[col][\"type\"])\n if value is None:\n cell_obj = None\n elif isinstance(value, tuple):\n cell_obj = {\"v\": value[0]}\n if len(value) > 1 and value[1] is not None:\n cell_obj[\"f\"] = value[1]\n if len(value) == 3:\n cell_obj[\"p\"] = value[2]\n else:\n cell_obj = {\"v\": value}\n cell_objs.append(cell_obj)\n row_obj = {\"c\": cell_objs}\n if cp:\n row_obj[\"p\"] = cp\n row_objs.append(row_obj)\n\n json_obj = {\"cols\": col_objs, \"rows\": row_objs}\n if self.custom_properties:\n json_obj[\"p\"] = self.custom_properties\n\n return json_obj\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.ToJSonResponse
|
python
|
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
|
Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L1011-L1049
|
[
"def _ToJSonObj(self, columns_order=None, order_by=()):\n \"\"\"Returns an object suitable to be converted to JSON.\n\n Args:\n columns_order: Optional. A list of all column IDs in the order in which\n you want them created in the output table. If specified,\n all column IDs must be present.\n order_by: Optional. Specifies the name of the column(s) to sort by.\n Passed as is to _PreparedData().\n\n Returns:\n A dictionary object for use by ToJSon or ToJSonResponse.\n \"\"\"\n if columns_order is None:\n columns_order = [col[\"id\"] for col in self.__columns]\n col_dict = dict([(col[\"id\"], col) for col in self.__columns])\n\n # Creating the column JSON objects\n col_objs = []\n for col_id in columns_order:\n col_obj = {\"id\": col_dict[col_id][\"id\"],\n \"label\": col_dict[col_id][\"label\"],\n \"type\": col_dict[col_id][\"type\"]}\n if col_dict[col_id][\"custom_properties\"]:\n col_obj[\"p\"] = col_dict[col_id][\"custom_properties\"]\n col_objs.append(col_obj)\n\n # Creating the rows jsons\n row_objs = []\n for row, cp in self._PreparedData(order_by):\n cell_objs = []\n for col in columns_order:\n value = self.CoerceValue(row.get(col, None), col_dict[col][\"type\"])\n if value is None:\n cell_obj = None\n elif isinstance(value, tuple):\n cell_obj = {\"v\": value[0]}\n if len(value) > 1 and value[1] is not None:\n cell_obj[\"f\"] = value[1]\n if len(value) == 3:\n cell_obj[\"p\"] = value[2]\n else:\n cell_obj = {\"v\": value}\n cell_objs.append(cell_obj)\n row_obj = {\"c\": cell_objs}\n if cp:\n row_obj[\"p\"] = cp\n row_objs.append(row_obj)\n\n json_obj = {\"cols\": col_objs, \"rows\": row_objs}\n if self.custom_properties:\n json_obj[\"p\"] = self.custom_properties\n\n return json_obj\n"
] |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
google/google-visualization-python
|
gviz_api.py
|
DataTable.ToResponse
|
python
|
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
|
Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
|
train
|
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L1051-L1098
| null |
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], six.string_types + (type(None),)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, six.integer_types + (float,)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, six.text_type):
return value
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, bytes):
return six.text_type(value, encoding="utf-8")
else:
return six.text_type(value)
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (six.string_types, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, six.string_types):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, six.string_types):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
sorted_data = self.__data[:]
if isinstance(order_by, six.string_types) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=lambda x: x[0].get(key))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
key_func = lambda x: x[0].get(key[0])
sorted_data.sort(key=key_func, reverse=key[1].lower() != "asc")
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
return sorted_data
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
html.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % html.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % html.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = six.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
def ensure_str(s):
"Compatibility function. Ensures using of str rather than unicode."
if isinstance(s, str):
return s
return s.encode("utf-8")
writer.writerow([ensure_str(col_dict[col]["label"])
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(ensure_str(self.ToString(value[1])))
else:
cells_list.append(ensure_str(self.ToString(value[0])))
else:
cells_list.append(ensure_str(self.ToString(value)))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
csv_result = self.ToCsv(columns_order, order_by, separator="\t")
if not isinstance(csv_result, six.text_type):
csv_result = csv_result.decode("utf-8")
return csv_result.encode("UTF-16LE")
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoded_response_str = DataTableJSONEncoder().encode(response_obj)
if not isinstance(encoded_response_str, str):
encoded_response_str = encoded_response_str.encode("utf-8")
return "%s(%s);" % (response_handler, encoded_response_str)
|
inveniosoftware/invenio-records
|
invenio_records/alembic/07fb52561c5c_alter_column_from_json_to_jsonb.py
|
upgrade
|
python
|
def upgrade():
if op._proxy.migration_context.dialect.name == 'postgresql':
op.alter_column(
'records_metadata',
'json',
type_=sa.dialects.postgresql.JSONB,
postgresql_using='json::text::jsonb'
)
|
Upgrade database.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/alembic/07fb52561c5c_alter_column_from_json_to_jsonb.py#L21-L29
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Alter column from json to jsonb."""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '07fb52561c5c'
down_revision = '862037093962'
branch_labels = ()
depends_on = None
def downgrade():
"""Downgrade database."""
if op._proxy.migration_context.dialect.name == 'postgresql':
op.alter_column(
'records_metadata',
'json',
type_=sa.dialects.postgresql.JSON,
postgresql_using='json::text::json'
)
|
inveniosoftware/invenio-records
|
invenio_records/alembic/07fb52561c5c_alter_column_from_json_to_jsonb.py
|
downgrade
|
python
|
def downgrade():
if op._proxy.migration_context.dialect.name == 'postgresql':
op.alter_column(
'records_metadata',
'json',
type_=sa.dialects.postgresql.JSON,
postgresql_using='json::text::json'
)
|
Downgrade database.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/alembic/07fb52561c5c_alter_column_from_json_to_jsonb.py#L32-L40
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Alter column from json to jsonb."""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '07fb52561c5c'
down_revision = '862037093962'
branch_labels = ()
depends_on = None
def upgrade():
"""Upgrade database."""
if op._proxy.migration_context.dialect.name == 'postgresql':
op.alter_column(
'records_metadata',
'json',
type_=sa.dialects.postgresql.JSONB,
postgresql_using='json::text::jsonb'
)
|
inveniosoftware/invenio-records
|
invenio_records/api.py
|
RecordBase.validate
|
python
|
def validate(self, **kwargs):
r"""Validate record according to schema defined in ``$schema`` key.
:Keyword Arguments:
* **format_checker** --
A ``format_checker`` is an instance of class
:class:`jsonschema.FormatChecker` containing business logic to
validate arbitrary formats. For example:
>>> from jsonschema import FormatChecker
>>> from jsonschema.validators import validate
>>> checker = FormatChecker()
>>> checker.checks('foo')(lambda el: el.startswith('foo'))
<function <lambda> at ...>
>>> validate('foo', {'format': 'foo'}, format_checker=checker)
returns ``None``, which means that the validation was successful,
while
>>> validate('bar', {'format': 'foo'},
... format_checker=checker) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValidationError: 'bar' is not a 'foo'
...
raises a :class:`jsonschema.exceptions.ValidationError`.
* **validator** --
A :class:`jsonschema.IValidator` class used for record validation.
It will be used as `cls` argument when calling
:func:`jsonschema.validate`. For example
>>> from jsonschema.validators import extend, Draft4Validator
>>> NoRequiredValidator = extend(
... Draft4Validator,
... validators={'required': lambda v, r, i, s: None}
... )
>>> schema = {
... 'type': 'object',
... 'properties': {
... 'name': { 'type': 'string' },
... 'email': { 'type': 'string' },
... 'address': {'type': 'string' },
... 'telephone': { 'type': 'string' }
... },
... 'required': ['name', 'email']
... }
>>> from jsonschema.validators import validate
>>> validate({}, schema, NoRequiredValidator)
returns ``None``, which means that the validation was successful,
while
>>> validate({}, schema) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValidationError: 'name' is a required property
...
raises a :class:`jsonschema.exceptions.ValidationError`.
"""
if '$schema' in self and self['$schema'] is not None:
kwargs['cls'] = kwargs.pop('validator', None)
_records_state.validate(self, self['$schema'], **kwargs)
|
r"""Validate record according to schema defined in ``$schema`` key.
:Keyword Arguments:
* **format_checker** --
A ``format_checker`` is an instance of class
:class:`jsonschema.FormatChecker` containing business logic to
validate arbitrary formats. For example:
>>> from jsonschema import FormatChecker
>>> from jsonschema.validators import validate
>>> checker = FormatChecker()
>>> checker.checks('foo')(lambda el: el.startswith('foo'))
<function <lambda> at ...>
>>> validate('foo', {'format': 'foo'}, format_checker=checker)
returns ``None``, which means that the validation was successful,
while
>>> validate('bar', {'format': 'foo'},
... format_checker=checker) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValidationError: 'bar' is not a 'foo'
...
raises a :class:`jsonschema.exceptions.ValidationError`.
* **validator** --
A :class:`jsonschema.IValidator` class used for record validation.
It will be used as `cls` argument when calling
:func:`jsonschema.validate`. For example
>>> from jsonschema.validators import extend, Draft4Validator
>>> NoRequiredValidator = extend(
... Draft4Validator,
... validators={'required': lambda v, r, i, s: None}
... )
>>> schema = {
... 'type': 'object',
... 'properties': {
... 'name': { 'type': 'string' },
... 'email': { 'type': 'string' },
... 'address': {'type': 'string' },
... 'telephone': { 'type': 'string' }
... },
... 'required': ['name', 'email']
... }
>>> from jsonschema.validators import validate
>>> validate({}, schema, NoRequiredValidator)
returns ``None``, which means that the validation was successful,
while
>>> validate({}, schema) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValidationError: 'name' is a required property
...
raises a :class:`jsonschema.exceptions.ValidationError`.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L62-L126
| null |
class RecordBase(dict):
"""Base class for Record and RecordBase."""
def __init__(self, data, model=None):
"""Initialize instance with dictionary data and SQLAlchemy model.
:param data: Dict with record metadata.
:param model: :class:`~invenio_records.models.RecordMetadata` instance.
"""
self.model = model
super(RecordBase, self).__init__(data or {})
@property
def id(self):
"""Get model identifier."""
return self.model.id if self.model else None
@property
def revision_id(self):
"""Get revision identifier."""
return self.model.version_id-1 if self.model else None
@property
def created(self):
"""Get creation timestamp."""
return self.model.created if self.model else None
@property
def updated(self):
"""Get last updated timestamp."""
return self.model.updated if self.model else None
def replace_refs(self):
"""Replace the ``$ref`` keys within the JSON."""
return _records_state.replace_refs(self)
def dumps(self, **kwargs):
"""Return pure Python dictionary with record metadata."""
return deepcopy(dict(self))
|
inveniosoftware/invenio-records
|
invenio_records/api.py
|
Record.create
|
python
|
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(
current_app._get_current_object(),
record=record
)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model)
after_record_insert.send(
current_app._get_current_object(),
record=record
)
return record
|
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L141-L189
|
[
"def validate(self, **kwargs):\n r\"\"\"Validate record according to schema defined in ``$schema`` key.\n\n :Keyword Arguments:\n * **format_checker** --\n A ``format_checker`` is an instance of class\n :class:`jsonschema.FormatChecker` containing business logic to\n validate arbitrary formats. For example:\n\n >>> from jsonschema import FormatChecker\n >>> from jsonschema.validators import validate\n >>> checker = FormatChecker()\n >>> checker.checks('foo')(lambda el: el.startswith('foo'))\n <function <lambda> at ...>\n >>> validate('foo', {'format': 'foo'}, format_checker=checker)\n\n returns ``None``, which means that the validation was successful,\n while\n\n >>> validate('bar', {'format': 'foo'},\n ... format_checker=checker) # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n ValidationError: 'bar' is not a 'foo'\n ...\n\n raises a :class:`jsonschema.exceptions.ValidationError`.\n\n * **validator** --\n A :class:`jsonschema.IValidator` class used for record validation.\n It will be used as `cls` argument when calling\n :func:`jsonschema.validate`. For example\n\n >>> from jsonschema.validators import extend, Draft4Validator\n >>> NoRequiredValidator = extend(\n ... Draft4Validator,\n ... validators={'required': lambda v, r, i, s: None}\n ... )\n >>> schema = {\n ... 'type': 'object',\n ... 'properties': {\n ... 'name': { 'type': 'string' },\n ... 'email': { 'type': 'string' },\n ... 'address': {'type': 'string' },\n ... 'telephone': { 'type': 'string' }\n ... },\n ... 'required': ['name', 'email']\n ... }\n >>> from jsonschema.validators import validate\n >>> validate({}, schema, NoRequiredValidator)\n\n returns ``None``, which means that the validation was successful,\n while\n\n >>> validate({}, schema) # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n ValidationError: 'name' is a required property\n ...\n\n raises a :class:`jsonschema.exceptions.ValidationError`.\n \"\"\"\n if '$schema' in self and self['$schema'] is not None:\n kwargs['cls'] = kwargs.pop('validator', None)\n _records_state.validate(self, self['$schema'], **kwargs)\n"
] |
class Record(RecordBase):
"""Define API for metadata creation and manipulation."""
@classmethod
@classmethod
def get_record(cls, id_, with_deleted=False):
"""Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
obj = query.one()
return cls(obj.json, model=obj)
@classmethod
def get_records(cls, ids, with_deleted=False):
"""Retrieve multiple records by id.
:param ids: List of record IDs.
:param with_deleted: If `True` then it includes deleted records.
:returns: A list of :class:`Record` instances.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter(RecordMetadata.id.in_(ids))
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
return [cls(obj.json, model=obj) for obj in query.all()]
def patch(self, patch):
"""Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance.
"""
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model)
def commit(self, **kwargs):
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
"""
if self.model is None or self.model.json is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_update.send(
current_app._get_current_object(),
record=self
)
self.validate(**kwargs)
self.model.json = dict(self)
flag_modified(self.model, 'json')
db.session.merge(self.model)
after_record_update.send(
current_app._get_current_object(),
record=self
)
return self
def delete(self, force=False):
"""Delete a record.
If `force` is ``False``, the record is soft-deleted: record data will
be deleted but the record identifier and the history of the record will
be kept. This ensures that the same record identifier cannot be used
twice, and that you can still retrieve its history. If `force` is
``True``, then the record is completely deleted from the database.
#. Send a signal :data:`invenio_records.signals.before_record_delete`
with the current record as parameter.
#. Delete or soft-delete the current record.
#. Send a signal :data:`invenio_records.signals.after_record_delete`
with the current deleted record as parameter.
:param force: if ``True``, completely deletes the current record from
the database, otherwise soft-deletes it.
:returns: The deleted :class:`Record` instance.
"""
if self.model is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_delete.send(
current_app._get_current_object(),
record=self
)
if force:
db.session.delete(self.model)
else:
self.model.json = None
db.session.merge(self.model)
after_record_delete.send(
current_app._get_current_object(),
record=self
)
return self
def revert(self, revision_id):
"""Revert the record to a specific revision.
#. Send a signal :data:`invenio_records.signals.before_record_revert`
with the current record as parameter.
#. Revert the record to the revision id passed as parameter.
#. Send a signal :data:`invenio_records.signals.after_record_revert`
with the reverted record as parameter.
:param revision_id: Specify the record revision id
:returns: The :class:`Record` instance corresponding to the revision id
"""
if self.model is None:
raise MissingModelError()
revision = self.revisions[revision_id]
with db.session.begin_nested():
before_record_revert.send(
current_app._get_current_object(),
record=self
)
self.model.json = dict(revision)
db.session.merge(self.model)
after_record_revert.send(
current_app._get_current_object(),
record=self
)
return self.__class__(self.model.json, model=self.model)
@property
def revisions(self):
"""Get revisions iterator."""
if self.model is None:
raise MissingModelError()
return RevisionsIterator(self.model)
|
inveniosoftware/invenio-records
|
invenio_records/api.py
|
Record.get_record
|
python
|
def get_record(cls, id_, with_deleted=False):
with db.session.no_autoflush:
query = RecordMetadata.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
obj = query.one()
return cls(obj.json, model=obj)
|
Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L192-L206
| null |
class Record(RecordBase):
"""Define API for metadata creation and manipulation."""
@classmethod
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(
current_app._get_current_object(),
record=record
)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model)
after_record_insert.send(
current_app._get_current_object(),
record=record
)
return record
@classmethod
@classmethod
def get_records(cls, ids, with_deleted=False):
"""Retrieve multiple records by id.
:param ids: List of record IDs.
:param with_deleted: If `True` then it includes deleted records.
:returns: A list of :class:`Record` instances.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter(RecordMetadata.id.in_(ids))
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
return [cls(obj.json, model=obj) for obj in query.all()]
def patch(self, patch):
"""Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance.
"""
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model)
def commit(self, **kwargs):
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
"""
if self.model is None or self.model.json is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_update.send(
current_app._get_current_object(),
record=self
)
self.validate(**kwargs)
self.model.json = dict(self)
flag_modified(self.model, 'json')
db.session.merge(self.model)
after_record_update.send(
current_app._get_current_object(),
record=self
)
return self
def delete(self, force=False):
"""Delete a record.
If `force` is ``False``, the record is soft-deleted: record data will
be deleted but the record identifier and the history of the record will
be kept. This ensures that the same record identifier cannot be used
twice, and that you can still retrieve its history. If `force` is
``True``, then the record is completely deleted from the database.
#. Send a signal :data:`invenio_records.signals.before_record_delete`
with the current record as parameter.
#. Delete or soft-delete the current record.
#. Send a signal :data:`invenio_records.signals.after_record_delete`
with the current deleted record as parameter.
:param force: if ``True``, completely deletes the current record from
the database, otherwise soft-deletes it.
:returns: The deleted :class:`Record` instance.
"""
if self.model is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_delete.send(
current_app._get_current_object(),
record=self
)
if force:
db.session.delete(self.model)
else:
self.model.json = None
db.session.merge(self.model)
after_record_delete.send(
current_app._get_current_object(),
record=self
)
return self
def revert(self, revision_id):
"""Revert the record to a specific revision.
#. Send a signal :data:`invenio_records.signals.before_record_revert`
with the current record as parameter.
#. Revert the record to the revision id passed as parameter.
#. Send a signal :data:`invenio_records.signals.after_record_revert`
with the reverted record as parameter.
:param revision_id: Specify the record revision id
:returns: The :class:`Record` instance corresponding to the revision id
"""
if self.model is None:
raise MissingModelError()
revision = self.revisions[revision_id]
with db.session.begin_nested():
before_record_revert.send(
current_app._get_current_object(),
record=self
)
self.model.json = dict(revision)
db.session.merge(self.model)
after_record_revert.send(
current_app._get_current_object(),
record=self
)
return self.__class__(self.model.json, model=self.model)
@property
def revisions(self):
"""Get revisions iterator."""
if self.model is None:
raise MissingModelError()
return RevisionsIterator(self.model)
|
inveniosoftware/invenio-records
|
invenio_records/api.py
|
Record.get_records
|
python
|
def get_records(cls, ids, with_deleted=False):
with db.session.no_autoflush:
query = RecordMetadata.query.filter(RecordMetadata.id.in_(ids))
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
return [cls(obj.json, model=obj) for obj in query.all()]
|
Retrieve multiple records by id.
:param ids: List of record IDs.
:param with_deleted: If `True` then it includes deleted records.
:returns: A list of :class:`Record` instances.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L209-L221
| null |
class Record(RecordBase):
"""Define API for metadata creation and manipulation."""
@classmethod
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(
current_app._get_current_object(),
record=record
)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model)
after_record_insert.send(
current_app._get_current_object(),
record=record
)
return record
@classmethod
def get_record(cls, id_, with_deleted=False):
"""Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
obj = query.one()
return cls(obj.json, model=obj)
@classmethod
def patch(self, patch):
"""Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance.
"""
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model)
def commit(self, **kwargs):
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
"""
if self.model is None or self.model.json is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_update.send(
current_app._get_current_object(),
record=self
)
self.validate(**kwargs)
self.model.json = dict(self)
flag_modified(self.model, 'json')
db.session.merge(self.model)
after_record_update.send(
current_app._get_current_object(),
record=self
)
return self
def delete(self, force=False):
"""Delete a record.
If `force` is ``False``, the record is soft-deleted: record data will
be deleted but the record identifier and the history of the record will
be kept. This ensures that the same record identifier cannot be used
twice, and that you can still retrieve its history. If `force` is
``True``, then the record is completely deleted from the database.
#. Send a signal :data:`invenio_records.signals.before_record_delete`
with the current record as parameter.
#. Delete or soft-delete the current record.
#. Send a signal :data:`invenio_records.signals.after_record_delete`
with the current deleted record as parameter.
:param force: if ``True``, completely deletes the current record from
the database, otherwise soft-deletes it.
:returns: The deleted :class:`Record` instance.
"""
if self.model is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_delete.send(
current_app._get_current_object(),
record=self
)
if force:
db.session.delete(self.model)
else:
self.model.json = None
db.session.merge(self.model)
after_record_delete.send(
current_app._get_current_object(),
record=self
)
return self
def revert(self, revision_id):
"""Revert the record to a specific revision.
#. Send a signal :data:`invenio_records.signals.before_record_revert`
with the current record as parameter.
#. Revert the record to the revision id passed as parameter.
#. Send a signal :data:`invenio_records.signals.after_record_revert`
with the reverted record as parameter.
:param revision_id: Specify the record revision id
:returns: The :class:`Record` instance corresponding to the revision id
"""
if self.model is None:
raise MissingModelError()
revision = self.revisions[revision_id]
with db.session.begin_nested():
before_record_revert.send(
current_app._get_current_object(),
record=self
)
self.model.json = dict(revision)
db.session.merge(self.model)
after_record_revert.send(
current_app._get_current_object(),
record=self
)
return self.__class__(self.model.json, model=self.model)
@property
def revisions(self):
"""Get revisions iterator."""
if self.model is None:
raise MissingModelError()
return RevisionsIterator(self.model)
|
inveniosoftware/invenio-records
|
invenio_records/api.py
|
Record.patch
|
python
|
def patch(self, patch):
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model)
|
Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L223-L230
| null |
class Record(RecordBase):
"""Define API for metadata creation and manipulation."""
@classmethod
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(
current_app._get_current_object(),
record=record
)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model)
after_record_insert.send(
current_app._get_current_object(),
record=record
)
return record
@classmethod
def get_record(cls, id_, with_deleted=False):
"""Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
obj = query.one()
return cls(obj.json, model=obj)
@classmethod
def get_records(cls, ids, with_deleted=False):
"""Retrieve multiple records by id.
:param ids: List of record IDs.
:param with_deleted: If `True` then it includes deleted records.
:returns: A list of :class:`Record` instances.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter(RecordMetadata.id.in_(ids))
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
return [cls(obj.json, model=obj) for obj in query.all()]
def commit(self, **kwargs):
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
"""
if self.model is None or self.model.json is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_update.send(
current_app._get_current_object(),
record=self
)
self.validate(**kwargs)
self.model.json = dict(self)
flag_modified(self.model, 'json')
db.session.merge(self.model)
after_record_update.send(
current_app._get_current_object(),
record=self
)
return self
def delete(self, force=False):
"""Delete a record.
If `force` is ``False``, the record is soft-deleted: record data will
be deleted but the record identifier and the history of the record will
be kept. This ensures that the same record identifier cannot be used
twice, and that you can still retrieve its history. If `force` is
``True``, then the record is completely deleted from the database.
#. Send a signal :data:`invenio_records.signals.before_record_delete`
with the current record as parameter.
#. Delete or soft-delete the current record.
#. Send a signal :data:`invenio_records.signals.after_record_delete`
with the current deleted record as parameter.
:param force: if ``True``, completely deletes the current record from
the database, otherwise soft-deletes it.
:returns: The deleted :class:`Record` instance.
"""
if self.model is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_delete.send(
current_app._get_current_object(),
record=self
)
if force:
db.session.delete(self.model)
else:
self.model.json = None
db.session.merge(self.model)
after_record_delete.send(
current_app._get_current_object(),
record=self
)
return self
def revert(self, revision_id):
"""Revert the record to a specific revision.
#. Send a signal :data:`invenio_records.signals.before_record_revert`
with the current record as parameter.
#. Revert the record to the revision id passed as parameter.
#. Send a signal :data:`invenio_records.signals.after_record_revert`
with the reverted record as parameter.
:param revision_id: Specify the record revision id
:returns: The :class:`Record` instance corresponding to the revision id
"""
if self.model is None:
raise MissingModelError()
revision = self.revisions[revision_id]
with db.session.begin_nested():
before_record_revert.send(
current_app._get_current_object(),
record=self
)
self.model.json = dict(revision)
db.session.merge(self.model)
after_record_revert.send(
current_app._get_current_object(),
record=self
)
return self.__class__(self.model.json, model=self.model)
@property
def revisions(self):
"""Get revisions iterator."""
if self.model is None:
raise MissingModelError()
return RevisionsIterator(self.model)
|
inveniosoftware/invenio-records
|
invenio_records/api.py
|
Record.commit
|
python
|
def commit(self, **kwargs):
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
"""
if self.model is None or self.model.json is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_update.send(
current_app._get_current_object(),
record=self
)
self.validate(**kwargs)
self.model.json = dict(self)
flag_modified(self.model, 'json')
db.session.merge(self.model)
after_record_update.send(
current_app._get_current_object(),
record=self
)
return self
|
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L232-L278
|
[
"def validate(self, **kwargs):\n r\"\"\"Validate record according to schema defined in ``$schema`` key.\n\n :Keyword Arguments:\n * **format_checker** --\n A ``format_checker`` is an instance of class\n :class:`jsonschema.FormatChecker` containing business logic to\n validate arbitrary formats. For example:\n\n >>> from jsonschema import FormatChecker\n >>> from jsonschema.validators import validate\n >>> checker = FormatChecker()\n >>> checker.checks('foo')(lambda el: el.startswith('foo'))\n <function <lambda> at ...>\n >>> validate('foo', {'format': 'foo'}, format_checker=checker)\n\n returns ``None``, which means that the validation was successful,\n while\n\n >>> validate('bar', {'format': 'foo'},\n ... format_checker=checker) # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n ValidationError: 'bar' is not a 'foo'\n ...\n\n raises a :class:`jsonschema.exceptions.ValidationError`.\n\n * **validator** --\n A :class:`jsonschema.IValidator` class used for record validation.\n It will be used as `cls` argument when calling\n :func:`jsonschema.validate`. For example\n\n >>> from jsonschema.validators import extend, Draft4Validator\n >>> NoRequiredValidator = extend(\n ... Draft4Validator,\n ... validators={'required': lambda v, r, i, s: None}\n ... )\n >>> schema = {\n ... 'type': 'object',\n ... 'properties': {\n ... 'name': { 'type': 'string' },\n ... 'email': { 'type': 'string' },\n ... 'address': {'type': 'string' },\n ... 'telephone': { 'type': 'string' }\n ... },\n ... 'required': ['name', 'email']\n ... }\n >>> from jsonschema.validators import validate\n >>> validate({}, schema, NoRequiredValidator)\n\n returns ``None``, which means that the validation was successful,\n while\n\n >>> validate({}, schema) # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n ValidationError: 'name' is a required property\n ...\n\n raises a :class:`jsonschema.exceptions.ValidationError`.\n \"\"\"\n if '$schema' in self and self['$schema'] is not None:\n kwargs['cls'] = kwargs.pop('validator', None)\n _records_state.validate(self, self['$schema'], **kwargs)\n"
] |
class Record(RecordBase):
"""Define API for metadata creation and manipulation."""
@classmethod
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(
current_app._get_current_object(),
record=record
)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model)
after_record_insert.send(
current_app._get_current_object(),
record=record
)
return record
@classmethod
def get_record(cls, id_, with_deleted=False):
"""Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
obj = query.one()
return cls(obj.json, model=obj)
@classmethod
def get_records(cls, ids, with_deleted=False):
"""Retrieve multiple records by id.
:param ids: List of record IDs.
:param with_deleted: If `True` then it includes deleted records.
:returns: A list of :class:`Record` instances.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter(RecordMetadata.id.in_(ids))
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
return [cls(obj.json, model=obj) for obj in query.all()]
def patch(self, patch):
"""Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance.
"""
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model)
def delete(self, force=False):
"""Delete a record.
If `force` is ``False``, the record is soft-deleted: record data will
be deleted but the record identifier and the history of the record will
be kept. This ensures that the same record identifier cannot be used
twice, and that you can still retrieve its history. If `force` is
``True``, then the record is completely deleted from the database.
#. Send a signal :data:`invenio_records.signals.before_record_delete`
with the current record as parameter.
#. Delete or soft-delete the current record.
#. Send a signal :data:`invenio_records.signals.after_record_delete`
with the current deleted record as parameter.
:param force: if ``True``, completely deletes the current record from
the database, otherwise soft-deletes it.
:returns: The deleted :class:`Record` instance.
"""
if self.model is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_delete.send(
current_app._get_current_object(),
record=self
)
if force:
db.session.delete(self.model)
else:
self.model.json = None
db.session.merge(self.model)
after_record_delete.send(
current_app._get_current_object(),
record=self
)
return self
def revert(self, revision_id):
"""Revert the record to a specific revision.
#. Send a signal :data:`invenio_records.signals.before_record_revert`
with the current record as parameter.
#. Revert the record to the revision id passed as parameter.
#. Send a signal :data:`invenio_records.signals.after_record_revert`
with the reverted record as parameter.
:param revision_id: Specify the record revision id
:returns: The :class:`Record` instance corresponding to the revision id
"""
if self.model is None:
raise MissingModelError()
revision = self.revisions[revision_id]
with db.session.begin_nested():
before_record_revert.send(
current_app._get_current_object(),
record=self
)
self.model.json = dict(revision)
db.session.merge(self.model)
after_record_revert.send(
current_app._get_current_object(),
record=self
)
return self.__class__(self.model.json, model=self.model)
@property
def revisions(self):
"""Get revisions iterator."""
if self.model is None:
raise MissingModelError()
return RevisionsIterator(self.model)
|
inveniosoftware/invenio-records
|
invenio_records/api.py
|
Record.delete
|
python
|
def delete(self, force=False):
if self.model is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_delete.send(
current_app._get_current_object(),
record=self
)
if force:
db.session.delete(self.model)
else:
self.model.json = None
db.session.merge(self.model)
after_record_delete.send(
current_app._get_current_object(),
record=self
)
return self
|
Delete a record.
If `force` is ``False``, the record is soft-deleted: record data will
be deleted but the record identifier and the history of the record will
be kept. This ensures that the same record identifier cannot be used
twice, and that you can still retrieve its history. If `force` is
``True``, then the record is completely deleted from the database.
#. Send a signal :data:`invenio_records.signals.before_record_delete`
with the current record as parameter.
#. Delete or soft-delete the current record.
#. Send a signal :data:`invenio_records.signals.after_record_delete`
with the current deleted record as parameter.
:param force: if ``True``, completely deletes the current record from
the database, otherwise soft-deletes it.
:returns: The deleted :class:`Record` instance.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L280-L320
| null |
class Record(RecordBase):
"""Define API for metadata creation and manipulation."""
@classmethod
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(
current_app._get_current_object(),
record=record
)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model)
after_record_insert.send(
current_app._get_current_object(),
record=record
)
return record
@classmethod
def get_record(cls, id_, with_deleted=False):
"""Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
obj = query.one()
return cls(obj.json, model=obj)
@classmethod
def get_records(cls, ids, with_deleted=False):
"""Retrieve multiple records by id.
:param ids: List of record IDs.
:param with_deleted: If `True` then it includes deleted records.
:returns: A list of :class:`Record` instances.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter(RecordMetadata.id.in_(ids))
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
return [cls(obj.json, model=obj) for obj in query.all()]
def patch(self, patch):
"""Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance.
"""
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model)
def commit(self, **kwargs):
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
"""
if self.model is None or self.model.json is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_update.send(
current_app._get_current_object(),
record=self
)
self.validate(**kwargs)
self.model.json = dict(self)
flag_modified(self.model, 'json')
db.session.merge(self.model)
after_record_update.send(
current_app._get_current_object(),
record=self
)
return self
def revert(self, revision_id):
"""Revert the record to a specific revision.
#. Send a signal :data:`invenio_records.signals.before_record_revert`
with the current record as parameter.
#. Revert the record to the revision id passed as parameter.
#. Send a signal :data:`invenio_records.signals.after_record_revert`
with the reverted record as parameter.
:param revision_id: Specify the record revision id
:returns: The :class:`Record` instance corresponding to the revision id
"""
if self.model is None:
raise MissingModelError()
revision = self.revisions[revision_id]
with db.session.begin_nested():
before_record_revert.send(
current_app._get_current_object(),
record=self
)
self.model.json = dict(revision)
db.session.merge(self.model)
after_record_revert.send(
current_app._get_current_object(),
record=self
)
return self.__class__(self.model.json, model=self.model)
@property
def revisions(self):
"""Get revisions iterator."""
if self.model is None:
raise MissingModelError()
return RevisionsIterator(self.model)
|
inveniosoftware/invenio-records
|
invenio_records/api.py
|
Record.revert
|
python
|
def revert(self, revision_id):
if self.model is None:
raise MissingModelError()
revision = self.revisions[revision_id]
with db.session.begin_nested():
before_record_revert.send(
current_app._get_current_object(),
record=self
)
self.model.json = dict(revision)
db.session.merge(self.model)
after_record_revert.send(
current_app._get_current_object(),
record=self
)
return self.__class__(self.model.json, model=self.model)
|
Revert the record to a specific revision.
#. Send a signal :data:`invenio_records.signals.before_record_revert`
with the current record as parameter.
#. Revert the record to the revision id passed as parameter.
#. Send a signal :data:`invenio_records.signals.after_record_revert`
with the reverted record as parameter.
:param revision_id: Specify the record revision id
:returns: The :class:`Record` instance corresponding to the revision id
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L322-L355
| null |
class Record(RecordBase):
"""Define API for metadata creation and manipulation."""
@classmethod
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
from .models import RecordMetadata
with db.session.begin_nested():
record = cls(data)
before_record_insert.send(
current_app._get_current_object(),
record=record
)
record.validate(**kwargs)
record.model = RecordMetadata(id=id_, json=record)
db.session.add(record.model)
after_record_insert.send(
current_app._get_current_object(),
record=record
)
return record
@classmethod
def get_record(cls, id_, with_deleted=False):
"""Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
obj = query.one()
return cls(obj.json, model=obj)
@classmethod
def get_records(cls, ids, with_deleted=False):
"""Retrieve multiple records by id.
:param ids: List of record IDs.
:param with_deleted: If `True` then it includes deleted records.
:returns: A list of :class:`Record` instances.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter(RecordMetadata.id.in_(ids))
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
return [cls(obj.json, model=obj) for obj in query.all()]
def patch(self, patch):
"""Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance.
"""
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model)
def commit(self, **kwargs):
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
"""
if self.model is None or self.model.json is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_update.send(
current_app._get_current_object(),
record=self
)
self.validate(**kwargs)
self.model.json = dict(self)
flag_modified(self.model, 'json')
db.session.merge(self.model)
after_record_update.send(
current_app._get_current_object(),
record=self
)
return self
def delete(self, force=False):
"""Delete a record.
If `force` is ``False``, the record is soft-deleted: record data will
be deleted but the record identifier and the history of the record will
be kept. This ensures that the same record identifier cannot be used
twice, and that you can still retrieve its history. If `force` is
``True``, then the record is completely deleted from the database.
#. Send a signal :data:`invenio_records.signals.before_record_delete`
with the current record as parameter.
#. Delete or soft-delete the current record.
#. Send a signal :data:`invenio_records.signals.after_record_delete`
with the current deleted record as parameter.
:param force: if ``True``, completely deletes the current record from
the database, otherwise soft-deletes it.
:returns: The deleted :class:`Record` instance.
"""
if self.model is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_delete.send(
current_app._get_current_object(),
record=self
)
if force:
db.session.delete(self.model)
else:
self.model.json = None
db.session.merge(self.model)
after_record_delete.send(
current_app._get_current_object(),
record=self
)
return self
@property
def revisions(self):
"""Get revisions iterator."""
if self.model is None:
raise MissingModelError()
return RevisionsIterator(self.model)
|
inveniosoftware/invenio-records
|
invenio_records/cli.py
|
create
|
python
|
def create(source, ids, force, pid_minter=None):
records_deprecation_warning()
# Make sure that all imports are done with application context.
from .api import Record
from .models import RecordMetadata
pid_minter = [process_minter(minter) for minter in pid_minter or []]
data = json.load(source)
if isinstance(data, dict):
data = [data]
if ids:
assert len(ids) == len(data), 'Not enough identifiers.'
for record, id_ in zip_longest(data, ids):
id_ = id_ or uuid.uuid4()
try:
for minter in pid_minter:
minter(id_, record)
click.echo(Record.create(record, id_=id_).id)
except exc.IntegrityError:
if force:
current_app.logger.warning(
"Trying to force insert: {0}".format(id_))
# IMPORTANT: We need to create new transaction for
# SQLAlchemy-Continuum as we are using no auto-flush
# in Record.get_record.
vm = current_app.extensions['invenio-db'].versioning_manager
uow = vm.unit_of_work(db.session)
uow.create_transaction(db.session)
# Use low-level database model to retrieve an instance.
model = RecordMetadata.query.get(id_)
rec = Record(record, model=model).commit()
current_app.logger.info("Created new revision {0}".format(
rec.revision_id))
click.echo(rec.id)
else:
raise click.BadParameter(
'Record with id={0} already exists. If you want to '
'override its data use --force.'.format(id_),
param_hint='ids',
)
db.session.flush()
db.session.commit()
|
Create new bibliographic record(s).
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/cli.py#L82-L130
|
[
"def records_deprecation_warning():\n \"\"\"Add deprecation warning for records cli.\"\"\"\n warnings.warn('The Invenio-Records cli module is deprecated.',\n PendingDeprecationWarning)\n",
"def create(cls, data, id_=None, **kwargs):\n r\"\"\"Create a new record instance and store it in the database.\n\n #. Send a signal :data:`invenio_records.signals.before_record_insert`\n with the new record as parameter.\n\n #. Validate the new record data.\n\n #. Add the new record in the database.\n\n #. Send a signal :data:`invenio_records.signals.after_record_insert`\n with the new created record as parameter.\n\n :Keyword Arguments:\n * **format_checker** --\n An instance of the class :class:`jsonschema.FormatChecker`, which\n contains validation rules for formats. See\n :func:`~invenio_records.api.RecordBase.validate` for more details.\n\n * **validator** --\n A :class:`jsonschema.IValidator` class that will be used to\n validate the record. See\n :func:`~invenio_records.api.RecordBase.validate` for more details.\n\n :param data: Dict with the record metadata.\n :param id_: Specify a UUID to use for the new record, instead of\n automatically generated.\n :returns: A new :class:`Record` instance.\n \"\"\"\n from .models import RecordMetadata\n with db.session.begin_nested():\n record = cls(data)\n\n before_record_insert.send(\n current_app._get_current_object(),\n record=record\n )\n\n record.validate(**kwargs)\n\n record.model = RecordMetadata(id=id_, json=record)\n\n db.session.add(record.model)\n\n after_record_insert.send(\n current_app._get_current_object(),\n record=record\n )\n return record\n",
"def commit(self, **kwargs):\n r\"\"\"Store changes of the current record instance in the database.\n\n #. Send a signal :data:`invenio_records.signals.before_record_update`\n with the current record to be committed as parameter.\n\n #. Validate the current record data.\n\n #. Commit the current record in the database.\n\n #. Send a signal :data:`invenio_records.signals.after_record_update`\n with the committed record as parameter.\n\n :Keyword Arguments:\n * **format_checker** --\n An instance of the class :class:`jsonschema.FormatChecker`, which\n contains validation rules for formats. See\n :func:`~invenio_records.api.RecordBase.validate` for more details.\n\n * **validator** --\n A :class:`jsonschema.IValidator` class that will be used to\n validate the record. See\n :func:`~invenio_records.api.RecordBase.validate` for more details.\n\n :returns: The :class:`Record` instance.\n \"\"\"\n if self.model is None or self.model.json is None:\n raise MissingModelError()\n\n with db.session.begin_nested():\n before_record_update.send(\n current_app._get_current_object(),\n record=self\n )\n\n self.validate(**kwargs)\n\n self.model.json = dict(self)\n flag_modified(self.model, 'json')\n\n db.session.merge(self.model)\n\n after_record_update.send(\n current_app._get_current_object(),\n record=self\n )\n return self\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line interface for record management."""
from __future__ import absolute_import, print_function
import json
import sys
import uuid
import warnings
import click
import pkg_resources
from flask import current_app
from flask.cli import with_appcontext
from invenio_db import db
from sqlalchemy import exc
def records_deprecation_warning():
"""Add deprecation warning for records cli."""
warnings.warn('The Invenio-Records cli module is deprecated.',
PendingDeprecationWarning)
try:
pkg_resources.get_distribution('invenio_pidstore')
except pkg_resources.DistributionNotFound:
HAS_PIDSTORE = False
else:
HAS_PIDSTORE = True
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
if HAS_PIDSTORE:
def process_minter(value):
"""Load minter from InvenioPIDStore registry based on given value."""
from invenio_pidstore import current_pidstore
if 'invenio-pidstore' not in current_app.extensions:
raise click.ClickException(
'Invenio-PIDStore has not been initialized.'
)
try:
return current_pidstore.minters[value]
except KeyError:
raise click.BadParameter(
'Unknown minter: {0}. Please choose one minter between [{1}].'
.format(value, ', '.join(current_pidstore.minters.keys()))
)
option_pid_minter = click.option('--pid-minter', multiple=True,
default=None)
else:
def option_pid_minter(_):
"""Empty option."""
return _
@click.group()
def records():
"""Records management."""
@records.command()
@click.argument('source', type=click.File('r'), default=sys.stdin)
@click.option('-i', '--id', 'ids', multiple=True)
@click.option('--force', is_flag=True, default=False)
@option_pid_minter
@with_appcontext
@records.command()
@click.argument('patch', type=click.File('r'), default=sys.stdin)
@click.option('-i', '--id', 'ids', multiple=True)
@with_appcontext
def patch(patch, ids):
"""Patch existing bibliographic record."""
records_deprecation_warning()
from .api import Record
patch_content = patch.read()
if ids:
for id_ in ids:
rec = Record.get_record(id_).patch(patch_content).commit()
current_app.logger.info("Created new revision {0}".format(
rec.revision_id))
click.echo(rec.id)
db.session.commit()
@records.command()
@click.option('-i', '--id', 'ids', multiple=True)
@click.option('--force', is_flag=True, default=False)
@with_appcontext
def delete(ids, force):
"""Delete bibliographic record(s)."""
records_deprecation_warning()
from .api import Record
for id_ in ids:
record = Record.get_record(id_, with_deleted=force)
record.delete(force=force)
db.session.commit()
|
inveniosoftware/invenio-records
|
invenio_records/cli.py
|
patch
|
python
|
def patch(patch, ids):
records_deprecation_warning()
from .api import Record
patch_content = patch.read()
if ids:
for id_ in ids:
rec = Record.get_record(id_).patch(patch_content).commit()
current_app.logger.info("Created new revision {0}".format(
rec.revision_id))
click.echo(rec.id)
db.session.commit()
|
Patch existing bibliographic record.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/cli.py#L137-L151
|
[
"def records_deprecation_warning():\n \"\"\"Add deprecation warning for records cli.\"\"\"\n warnings.warn('The Invenio-Records cli module is deprecated.',\n PendingDeprecationWarning)\n",
"def get_record(cls, id_, with_deleted=False):\n \"\"\"Retrieve the record by id.\n\n Raise a database exception if the record does not exist.\n\n :param id_: record ID.\n :param with_deleted: If `True` then it includes deleted records.\n :returns: The :class:`Record` instance.\n \"\"\"\n with db.session.no_autoflush:\n query = RecordMetadata.query.filter_by(id=id_)\n if not with_deleted:\n query = query.filter(RecordMetadata.json != None) # noqa\n obj = query.one()\n return cls(obj.json, model=obj)\n",
"def patch(self, patch):\n \"\"\"Patch record metadata.\n\n :params patch: Dictionary of record metadata.\n :returns: A new :class:`Record` instance.\n \"\"\"\n data = apply_patch(dict(self), patch)\n return self.__class__(data, model=self.model)\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line interface for record management."""
from __future__ import absolute_import, print_function
import json
import sys
import uuid
import warnings
import click
import pkg_resources
from flask import current_app
from flask.cli import with_appcontext
from invenio_db import db
from sqlalchemy import exc
def records_deprecation_warning():
"""Add deprecation warning for records cli."""
warnings.warn('The Invenio-Records cli module is deprecated.',
PendingDeprecationWarning)
try:
pkg_resources.get_distribution('invenio_pidstore')
except pkg_resources.DistributionNotFound:
HAS_PIDSTORE = False
else:
HAS_PIDSTORE = True
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
if HAS_PIDSTORE:
def process_minter(value):
"""Load minter from InvenioPIDStore registry based on given value."""
from invenio_pidstore import current_pidstore
if 'invenio-pidstore' not in current_app.extensions:
raise click.ClickException(
'Invenio-PIDStore has not been initialized.'
)
try:
return current_pidstore.minters[value]
except KeyError:
raise click.BadParameter(
'Unknown minter: {0}. Please choose one minter between [{1}].'
.format(value, ', '.join(current_pidstore.minters.keys()))
)
option_pid_minter = click.option('--pid-minter', multiple=True,
default=None)
else:
def option_pid_minter(_):
"""Empty option."""
return _
@click.group()
def records():
"""Records management."""
@records.command()
@click.argument('source', type=click.File('r'), default=sys.stdin)
@click.option('-i', '--id', 'ids', multiple=True)
@click.option('--force', is_flag=True, default=False)
@option_pid_minter
@with_appcontext
def create(source, ids, force, pid_minter=None):
"""Create new bibliographic record(s)."""
records_deprecation_warning()
# Make sure that all imports are done with application context.
from .api import Record
from .models import RecordMetadata
pid_minter = [process_minter(minter) for minter in pid_minter or []]
data = json.load(source)
if isinstance(data, dict):
data = [data]
if ids:
assert len(ids) == len(data), 'Not enough identifiers.'
for record, id_ in zip_longest(data, ids):
id_ = id_ or uuid.uuid4()
try:
for minter in pid_minter:
minter(id_, record)
click.echo(Record.create(record, id_=id_).id)
except exc.IntegrityError:
if force:
current_app.logger.warning(
"Trying to force insert: {0}".format(id_))
# IMPORTANT: We need to create new transaction for
# SQLAlchemy-Continuum as we are using no auto-flush
# in Record.get_record.
vm = current_app.extensions['invenio-db'].versioning_manager
uow = vm.unit_of_work(db.session)
uow.create_transaction(db.session)
# Use low-level database model to retrieve an instance.
model = RecordMetadata.query.get(id_)
rec = Record(record, model=model).commit()
current_app.logger.info("Created new revision {0}".format(
rec.revision_id))
click.echo(rec.id)
else:
raise click.BadParameter(
'Record with id={0} already exists. If you want to '
'override its data use --force.'.format(id_),
param_hint='ids',
)
db.session.flush()
db.session.commit()
@records.command()
@click.argument('patch', type=click.File('r'), default=sys.stdin)
@click.option('-i', '--id', 'ids', multiple=True)
@with_appcontext
@records.command()
@click.option('-i', '--id', 'ids', multiple=True)
@click.option('--force', is_flag=True, default=False)
@with_appcontext
def delete(ids, force):
"""Delete bibliographic record(s)."""
records_deprecation_warning()
from .api import Record
for id_ in ids:
record = Record.get_record(id_, with_deleted=force)
record.delete(force=force)
db.session.commit()
|
inveniosoftware/invenio-records
|
invenio_records/cli.py
|
delete
|
python
|
def delete(ids, force):
records_deprecation_warning()
from .api import Record
for id_ in ids:
record = Record.get_record(id_, with_deleted=force)
record.delete(force=force)
db.session.commit()
|
Delete bibliographic record(s).
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/cli.py#L158-L166
|
[
"def records_deprecation_warning():\n \"\"\"Add deprecation warning for records cli.\"\"\"\n warnings.warn('The Invenio-Records cli module is deprecated.',\n PendingDeprecationWarning)\n",
"def get_record(cls, id_, with_deleted=False):\n \"\"\"Retrieve the record by id.\n\n Raise a database exception if the record does not exist.\n\n :param id_: record ID.\n :param with_deleted: If `True` then it includes deleted records.\n :returns: The :class:`Record` instance.\n \"\"\"\n with db.session.no_autoflush:\n query = RecordMetadata.query.filter_by(id=id_)\n if not with_deleted:\n query = query.filter(RecordMetadata.json != None) # noqa\n obj = query.one()\n return cls(obj.json, model=obj)\n",
"def delete(self, force=False):\n \"\"\"Delete a record.\n\n If `force` is ``False``, the record is soft-deleted: record data will\n be deleted but the record identifier and the history of the record will\n be kept. This ensures that the same record identifier cannot be used\n twice, and that you can still retrieve its history. If `force` is\n ``True``, then the record is completely deleted from the database.\n\n #. Send a signal :data:`invenio_records.signals.before_record_delete`\n with the current record as parameter.\n\n #. Delete or soft-delete the current record.\n\n #. Send a signal :data:`invenio_records.signals.after_record_delete`\n with the current deleted record as parameter.\n\n :param force: if ``True``, completely deletes the current record from\n the database, otherwise soft-deletes it.\n :returns: The deleted :class:`Record` instance.\n \"\"\"\n if self.model is None:\n raise MissingModelError()\n\n with db.session.begin_nested():\n before_record_delete.send(\n current_app._get_current_object(),\n record=self\n )\n\n if force:\n db.session.delete(self.model)\n else:\n self.model.json = None\n db.session.merge(self.model)\n\n after_record_delete.send(\n current_app._get_current_object(),\n record=self\n )\n return self\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line interface for record management."""
from __future__ import absolute_import, print_function
import json
import sys
import uuid
import warnings
import click
import pkg_resources
from flask import current_app
from flask.cli import with_appcontext
from invenio_db import db
from sqlalchemy import exc
def records_deprecation_warning():
"""Add deprecation warning for records cli."""
warnings.warn('The Invenio-Records cli module is deprecated.',
PendingDeprecationWarning)
try:
pkg_resources.get_distribution('invenio_pidstore')
except pkg_resources.DistributionNotFound:
HAS_PIDSTORE = False
else:
HAS_PIDSTORE = True
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
if HAS_PIDSTORE:
def process_minter(value):
"""Load minter from InvenioPIDStore registry based on given value."""
from invenio_pidstore import current_pidstore
if 'invenio-pidstore' not in current_app.extensions:
raise click.ClickException(
'Invenio-PIDStore has not been initialized.'
)
try:
return current_pidstore.minters[value]
except KeyError:
raise click.BadParameter(
'Unknown minter: {0}. Please choose one minter between [{1}].'
.format(value, ', '.join(current_pidstore.minters.keys()))
)
option_pid_minter = click.option('--pid-minter', multiple=True,
default=None)
else:
def option_pid_minter(_):
"""Empty option."""
return _
@click.group()
def records():
"""Records management."""
@records.command()
@click.argument('source', type=click.File('r'), default=sys.stdin)
@click.option('-i', '--id', 'ids', multiple=True)
@click.option('--force', is_flag=True, default=False)
@option_pid_minter
@with_appcontext
def create(source, ids, force, pid_minter=None):
"""Create new bibliographic record(s)."""
records_deprecation_warning()
# Make sure that all imports are done with application context.
from .api import Record
from .models import RecordMetadata
pid_minter = [process_minter(minter) for minter in pid_minter or []]
data = json.load(source)
if isinstance(data, dict):
data = [data]
if ids:
assert len(ids) == len(data), 'Not enough identifiers.'
for record, id_ in zip_longest(data, ids):
id_ = id_ or uuid.uuid4()
try:
for minter in pid_minter:
minter(id_, record)
click.echo(Record.create(record, id_=id_).id)
except exc.IntegrityError:
if force:
current_app.logger.warning(
"Trying to force insert: {0}".format(id_))
# IMPORTANT: We need to create new transaction for
# SQLAlchemy-Continuum as we are using no auto-flush
# in Record.get_record.
vm = current_app.extensions['invenio-db'].versioning_manager
uow = vm.unit_of_work(db.session)
uow.create_transaction(db.session)
# Use low-level database model to retrieve an instance.
model = RecordMetadata.query.get(id_)
rec = Record(record, model=model).commit()
current_app.logger.info("Created new revision {0}".format(
rec.revision_id))
click.echo(rec.id)
else:
raise click.BadParameter(
'Record with id={0} already exists. If you want to '
'override its data use --force.'.format(id_),
param_hint='ids',
)
db.session.flush()
db.session.commit()
@records.command()
@click.argument('patch', type=click.File('r'), default=sys.stdin)
@click.option('-i', '--id', 'ids', multiple=True)
@with_appcontext
def patch(patch, ids):
"""Patch existing bibliographic record."""
records_deprecation_warning()
from .api import Record
patch_content = patch.read()
if ids:
for id_ in ids:
rec = Record.get_record(id_).patch(patch_content).commit()
current_app.logger.info("Created new revision {0}".format(
rec.revision_id))
click.echo(rec.id)
db.session.commit()
@records.command()
@click.option('-i', '--id', 'ids', multiple=True)
@click.option('--force', is_flag=True, default=False)
@with_appcontext
|
inveniosoftware/invenio-records
|
invenio_records/alembic/862037093962_create_records_tables.py
|
upgrade
|
python
|
def upgrade():
op.create_table(
'records_metadata',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column(
'id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('json', sqlalchemy_utils.JSONType().with_variant(
sa.dialects.postgresql.JSON(
none_as_null=True), 'postgresql',
), nullable=True),
sa.Column('version_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'records_metadata_version',
sa.Column('created', sa.DateTime(),
autoincrement=False, nullable=True),
sa.Column('updated', sa.DateTime(),
autoincrement=False, nullable=True),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(),
autoincrement=False, nullable=False),
sa.Column('json', sqlalchemy_utils.JSONType().with_variant(
sa.dialects.postgresql.JSON(
none_as_null=True), 'postgresql',
), autoincrement=False, nullable=True),
sa.Column('version_id', sa.Integer(),
autoincrement=False, nullable=True),
sa.Column('transaction_id', sa.BigInteger(),
autoincrement=False, nullable=False),
sa.Column('end_transaction_id',
sa.BigInteger(), nullable=True),
sa.Column('operation_type',
sa.SmallInteger(), nullable=False),
sa.PrimaryKeyConstraint('id', 'transaction_id')
)
op.create_index(
op.f('ix_records_metadata_version_end_transaction_id'),
'records_metadata_version', ['end_transaction_id'], unique=False
)
op.create_index(
op.f('ix_records_metadata_version_operation_type'),
'records_metadata_version', ['operation_type'], unique=False
)
op.create_index(
op.f('ix_records_metadata_version_transaction_id'),
'records_metadata_version', ['transaction_id'], unique=False
)
|
Upgrade database.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/alembic/862037093962_create_records_tables.py#L22-L70
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Create records tables."""
import sqlalchemy as sa
import sqlalchemy_utils
from alembic import op
# revision identifiers, used by Alembic.
revision = '862037093962'
down_revision = '1095cdf9f350'
branch_labels = ()
depends_on = None
def downgrade():
"""Downgrade database."""
op.drop_index(op.f('ix_records_metadata_version_transaction_id'),
table_name='records_metadata_version')
op.drop_index(op.f('ix_records_metadata_version_operation_type'),
table_name='records_metadata_version')
op.drop_index(op.f('ix_records_metadata_version_end_transaction_id'),
table_name='records_metadata_version')
op.drop_table('records_metadata_version')
op.drop_table('records_metadata')
|
inveniosoftware/invenio-records
|
invenio_records/ext.py
|
_RecordsState.validate
|
python
|
def validate(self, data, schema, **kwargs):
if not isinstance(schema, dict):
schema = {'$ref': schema}
return validate(
data,
schema,
resolver=self.ref_resolver_cls.from_schema(schema),
types=self.app.config.get('RECORDS_VALIDATION_TYPES', {}),
**kwargs
)
|
Validate data using schema with ``JSONResolver``.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/ext.py#L32-L42
| null |
class _RecordsState(object):
"""State for record JSON resolver."""
def __init__(self, app, entry_point_group=None):
"""Initialize state."""
self.app = app
self.resolver = JSONResolver(entry_point_group=entry_point_group)
self.ref_resolver_cls = ref_resolver_factory(self.resolver)
self.loader_cls = json_loader_factory(self.resolver)
def replace_refs(self, data):
"""Replace the JSON reference objects with ``JsonRef``."""
return JsonRef.replace_refs(data, loader=self.loader_cls())
|
inveniosoftware/invenio-records
|
invenio_records/admin.py
|
RecordMetadataModelView.delete_model
|
python
|
def delete_model(self, model):
try:
if model.json is None:
return True
record = Record(model.json, model=model)
record.delete()
db.session.commit()
except SQLAlchemyError as e:
if not self.handle_view_exception(e):
flash(_('Failed to delete record. %(error)s', error=str(e)),
category='error')
db.session.rollback()
return False
return True
|
Delete a record.
|
train
|
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/admin.py#L49-L63
|
[
"def delete(self, force=False):\n \"\"\"Delete a record.\n\n If `force` is ``False``, the record is soft-deleted: record data will\n be deleted but the record identifier and the history of the record will\n be kept. This ensures that the same record identifier cannot be used\n twice, and that you can still retrieve its history. If `force` is\n ``True``, then the record is completely deleted from the database.\n\n #. Send a signal :data:`invenio_records.signals.before_record_delete`\n with the current record as parameter.\n\n #. Delete or soft-delete the current record.\n\n #. Send a signal :data:`invenio_records.signals.after_record_delete`\n with the current deleted record as parameter.\n\n :param force: if ``True``, completely deletes the current record from\n the database, otherwise soft-deletes it.\n :returns: The deleted :class:`Record` instance.\n \"\"\"\n if self.model is None:\n raise MissingModelError()\n\n with db.session.begin_nested():\n before_record_delete.send(\n current_app._get_current_object(),\n record=self\n )\n\n if force:\n db.session.delete(self.model)\n else:\n self.model.json = None\n db.session.merge(self.model)\n\n after_record_delete.send(\n current_app._get_current_object(),\n record=self\n )\n return self\n"
] |
class RecordMetadataModelView(ModelView):
"""Records admin model view."""
filter_converter = FilterConverter()
can_create = False
can_edit = False
can_delete = True
can_view_details = True
column_list = ('id', 'version_id', 'updated', 'created',)
column_details_list = ('id', 'version_id', 'updated', 'created', 'json')
column_labels = dict(
id=_('UUID'),
version_id=_('Revision'),
json=_('JSON'),
)
column_formatters = dict(
version_id=lambda v, c, m, p: m.version_id-1,
json=lambda v, c, m, p: Markup("<pre>{0}</pre>".format(
json.dumps(m.json, indent=2, sort_keys=True)))
)
column_filters = ('created', 'updated', )
column_default_sort = ('updated', True)
page_size = 25
|
shinux/PyTime
|
pytime/filter.py
|
BaseParser._str_parser
|
python
|
def _str_parser(string):
if not any(c.isalpha() for c in string):
_string = string[:19]
_length = len(_string)
if _length > 10:
return BaseParser.parse_datetime
elif 6 <= _length <= 10:
if ':' in _string:
return BaseParser.parse_time
else:
return BaseParser.parse_date
elif _length < 6:
return BaseParser.parse_time
else:
return BaseParser.parse_special
else:
return BaseParser.__parse_not_only_str
|
return method by the length of string
:param string: string
:return: method
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/filter.py#L86-L107
| null |
class BaseParser(object):
"""Parse string to regular datetime/date type
1990-10-28 23:23:23 - 19
90-10-28 23:23:23 - 17
1990-10-28 - 10
28-10-1990 - 10
1990/10/28 - 10
28/10/1990 - 10
1990.10.28 - 10
28.10.1990 - 10
10-28-90 USA - 8
28-10-90 on hold - 8
10/28/90 USA - 8
28/10/90 on hold - 8
19901028 - 8
90-10-28 - 8
90/10/28 - 8
23:23:23 - 8
23:23 - 5
23:2 - 4
5:14 - 4
5:2 - 3
"""
def __init__(self, *args, **kwargs):
pass
@staticmethod
@staticmethod
def __parse_not_only_str(string):
baseParser_functions_to_try = [BaseParser.from_str, BaseParser.parse_diff]
raised_exception = None
for baseParser_function in baseParser_functions_to_try:
try:
return baseParser_function(string)
except Exception as e:
raised_exception = e
if raised_exception:
raise raised_exception
@staticmethod
def _datetime_parser(value):
return value
@staticmethod
def _timestamp_parser(value):
return datetime.datetime.fromtimestamp(value)
@staticmethod
def _special_parser(value):
return value
def _main_parser(func):
def wrapper(cls, *args, **kwargs):
value = args[0]
if isinstance(value, str_tuple):
method = BaseParser._str_parser(value)
elif isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
method = BaseParser._datetime_parser
elif isinstance(value, (int, float)):
method = BaseParser._timestamp_parser
else:
method = BaseParser._special_parser
if hasattr(method, '__call__'):
return method(value)
else:
raise UnexpectedTypeError(
'can not generate method for {value} type:{type}'.format(value=value, type=type(value)))
return wrapper
@classmethod
@_main_parser
def main(cls, value):
"""parse all type value"""
@staticmethod
def parse_datetime(string, formation=None):
if formation:
_stamp = datetime.datetime.strptime(string, formation)
elif len(string) >= 18:
_stamp = datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
elif len(string) < 18:
if '-' in string:
_stamp = datetime.datetime.strptime(string, '%y-%m-%d %H:%M:%S')
else:
try:
_stamp = datetime.datetime.strptime(string, '%Y%m%d %H:%M:%S')
except ValueError:
_stamp = datetime.datetime.strptime(string, '%y%m%d %H:%M:%S')
else:
raise CanNotFormatError('Need %Y-%m-%d %H:%M:%S or %y-%m-%d %H:%M:%S')
return _stamp
@staticmethod
def parse_date(string, formation=None):
"""
string to date stamp
:param string: date string
:param formation: format string
:return: datetime.date
"""
if formation:
_stamp = datetime.datetime.strptime(string, formation).date()
return _stamp
_string = string.replace('.', '-').replace('/', '-')
if '-' in _string:
if len(_string.split('-')[0]) > 3 or len(_string.split('-')[2]) > 3:
try:
_stamp = datetime.datetime.strptime(_string, '%Y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%Y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%Y').date()
else:
try:
_stamp = datetime.datetime.strptime(_string, '%y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%y').date()
else:
if len(_string) > 6:
try:
_stamp = datetime.datetime.strptime(_string, '%Y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%Y').date()
elif len(_string) <= 6:
try:
_stamp = datetime.datetime.strptime(_string, '%y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%y').date()
else:
raise CanNotFormatError
return _stamp
@staticmethod
def parse_time(string):
pass
@staticmethod
def parse_special(string):
pass
@staticmethod
def parse_diff(base_str):
"""
parse string to regular timedelta
:param base_str: str
:return: dict
"""
temp_dict = {'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 0,
'seconds': 0}
_pure_str = re.findall("[a-zA-Z]+", base_str)
pure_num = [int(_) for _ in re.findall(r'\d+', base_str)]
pure_str = [filter_unit(_) for _ in _pure_str]
result_dict = dict(chain(temp_dict.items(), dict(zip(pure_str, pure_num)).items()))
if result_dict['months'] >= 12:
advance = result_dict['months'] // 12
remain = result_dict['months'] % 12
result_dict['years'] += advance
result_dict['months'] = remain
if result_dict['weeks']:
result_dict['days'] += result_dict['weeks'] * 7
return result_dict
@staticmethod
def from_str(date):
"""
Given a date in the format: Jan,21st.2015
will return a datetime of it.
"""
month = date[:3][0] + date[:3][-2:].lower()
if month not in NAMED_MONTHS:
raise CanNotFormatError('Month not recognized')
date = date.replace(',', '').replace(' ', '').replace('.', '')
try:
day_unit = [x for x in ['st', 'rd', 'nd', 'th'] if x in date][0]
day = int(re.search(r'\d+', date.split(day_unit)[0]).group())
year = int(re.search(r'\d+', date.split(day_unit)[1]).group())
numeric_month = NAMED_MONTHS[month]
return datetime.date(int(year), numeric_month, day)
except:
raise CanNotFormatError('Not well formatted. Expecting something like May,21st.2015')
|
shinux/PyTime
|
pytime/filter.py
|
BaseParser.parse_date
|
python
|
def parse_date(string, formation=None):
if formation:
_stamp = datetime.datetime.strptime(string, formation).date()
return _stamp
_string = string.replace('.', '-').replace('/', '-')
if '-' in _string:
if len(_string.split('-')[0]) > 3 or len(_string.split('-')[2]) > 3:
try:
_stamp = datetime.datetime.strptime(_string, '%Y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%Y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%Y').date()
else:
try:
_stamp = datetime.datetime.strptime(_string, '%y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%y').date()
else:
if len(_string) > 6:
try:
_stamp = datetime.datetime.strptime(_string, '%Y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%Y').date()
elif len(_string) <= 6:
try:
_stamp = datetime.datetime.strptime(_string, '%y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%y').date()
else:
raise CanNotFormatError
return _stamp
|
string to date stamp
:param string: date string
:param formation: format string
:return: datetime.date
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/filter.py#L178-L220
| null |
class BaseParser(object):
"""Parse string to regular datetime/date type
1990-10-28 23:23:23 - 19
90-10-28 23:23:23 - 17
1990-10-28 - 10
28-10-1990 - 10
1990/10/28 - 10
28/10/1990 - 10
1990.10.28 - 10
28.10.1990 - 10
10-28-90 USA - 8
28-10-90 on hold - 8
10/28/90 USA - 8
28/10/90 on hold - 8
19901028 - 8
90-10-28 - 8
90/10/28 - 8
23:23:23 - 8
23:23 - 5
23:2 - 4
5:14 - 4
5:2 - 3
"""
def __init__(self, *args, **kwargs):
pass
@staticmethod
def _str_parser(string):
"""
return method by the length of string
:param string: string
:return: method
"""
if not any(c.isalpha() for c in string):
_string = string[:19]
_length = len(_string)
if _length > 10:
return BaseParser.parse_datetime
elif 6 <= _length <= 10:
if ':' in _string:
return BaseParser.parse_time
else:
return BaseParser.parse_date
elif _length < 6:
return BaseParser.parse_time
else:
return BaseParser.parse_special
else:
return BaseParser.__parse_not_only_str
@staticmethod
def __parse_not_only_str(string):
baseParser_functions_to_try = [BaseParser.from_str, BaseParser.parse_diff]
raised_exception = None
for baseParser_function in baseParser_functions_to_try:
try:
return baseParser_function(string)
except Exception as e:
raised_exception = e
if raised_exception:
raise raised_exception
@staticmethod
def _datetime_parser(value):
return value
@staticmethod
def _timestamp_parser(value):
return datetime.datetime.fromtimestamp(value)
@staticmethod
def _special_parser(value):
return value
def _main_parser(func):
def wrapper(cls, *args, **kwargs):
value = args[0]
if isinstance(value, str_tuple):
method = BaseParser._str_parser(value)
elif isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
method = BaseParser._datetime_parser
elif isinstance(value, (int, float)):
method = BaseParser._timestamp_parser
else:
method = BaseParser._special_parser
if hasattr(method, '__call__'):
return method(value)
else:
raise UnexpectedTypeError(
'can not generate method for {value} type:{type}'.format(value=value, type=type(value)))
return wrapper
@classmethod
@_main_parser
def main(cls, value):
"""parse all type value"""
@staticmethod
def parse_datetime(string, formation=None):
if formation:
_stamp = datetime.datetime.strptime(string, formation)
elif len(string) >= 18:
_stamp = datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
elif len(string) < 18:
if '-' in string:
_stamp = datetime.datetime.strptime(string, '%y-%m-%d %H:%M:%S')
else:
try:
_stamp = datetime.datetime.strptime(string, '%Y%m%d %H:%M:%S')
except ValueError:
_stamp = datetime.datetime.strptime(string, '%y%m%d %H:%M:%S')
else:
raise CanNotFormatError('Need %Y-%m-%d %H:%M:%S or %y-%m-%d %H:%M:%S')
return _stamp
@staticmethod
@staticmethod
def parse_time(string):
pass
@staticmethod
def parse_special(string):
pass
@staticmethod
def parse_diff(base_str):
"""
parse string to regular timedelta
:param base_str: str
:return: dict
"""
temp_dict = {'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 0,
'seconds': 0}
_pure_str = re.findall("[a-zA-Z]+", base_str)
pure_num = [int(_) for _ in re.findall(r'\d+', base_str)]
pure_str = [filter_unit(_) for _ in _pure_str]
result_dict = dict(chain(temp_dict.items(), dict(zip(pure_str, pure_num)).items()))
if result_dict['months'] >= 12:
advance = result_dict['months'] // 12
remain = result_dict['months'] % 12
result_dict['years'] += advance
result_dict['months'] = remain
if result_dict['weeks']:
result_dict['days'] += result_dict['weeks'] * 7
return result_dict
@staticmethod
def from_str(date):
"""
Given a date in the format: Jan,21st.2015
will return a datetime of it.
"""
month = date[:3][0] + date[:3][-2:].lower()
if month not in NAMED_MONTHS:
raise CanNotFormatError('Month not recognized')
date = date.replace(',', '').replace(' ', '').replace('.', '')
try:
day_unit = [x for x in ['st', 'rd', 'nd', 'th'] if x in date][0]
day = int(re.search(r'\d+', date.split(day_unit)[0]).group())
year = int(re.search(r'\d+', date.split(day_unit)[1]).group())
numeric_month = NAMED_MONTHS[month]
return datetime.date(int(year), numeric_month, day)
except:
raise CanNotFormatError('Not well formatted. Expecting something like May,21st.2015')
|
shinux/PyTime
|
pytime/filter.py
|
BaseParser.parse_diff
|
python
|
def parse_diff(base_str):
temp_dict = {'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 0,
'seconds': 0}
_pure_str = re.findall("[a-zA-Z]+", base_str)
pure_num = [int(_) for _ in re.findall(r'\d+', base_str)]
pure_str = [filter_unit(_) for _ in _pure_str]
result_dict = dict(chain(temp_dict.items(), dict(zip(pure_str, pure_num)).items()))
if result_dict['months'] >= 12:
advance = result_dict['months'] // 12
remain = result_dict['months'] % 12
result_dict['years'] += advance
result_dict['months'] = remain
if result_dict['weeks']:
result_dict['days'] += result_dict['weeks'] * 7
return result_dict
|
parse string to regular timedelta
:param base_str: str
:return: dict
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/filter.py#L231-L256
| null |
class BaseParser(object):
"""Parse string to regular datetime/date type
1990-10-28 23:23:23 - 19
90-10-28 23:23:23 - 17
1990-10-28 - 10
28-10-1990 - 10
1990/10/28 - 10
28/10/1990 - 10
1990.10.28 - 10
28.10.1990 - 10
10-28-90 USA - 8
28-10-90 on hold - 8
10/28/90 USA - 8
28/10/90 on hold - 8
19901028 - 8
90-10-28 - 8
90/10/28 - 8
23:23:23 - 8
23:23 - 5
23:2 - 4
5:14 - 4
5:2 - 3
"""
def __init__(self, *args, **kwargs):
pass
@staticmethod
def _str_parser(string):
"""
return method by the length of string
:param string: string
:return: method
"""
if not any(c.isalpha() for c in string):
_string = string[:19]
_length = len(_string)
if _length > 10:
return BaseParser.parse_datetime
elif 6 <= _length <= 10:
if ':' in _string:
return BaseParser.parse_time
else:
return BaseParser.parse_date
elif _length < 6:
return BaseParser.parse_time
else:
return BaseParser.parse_special
else:
return BaseParser.__parse_not_only_str
@staticmethod
def __parse_not_only_str(string):
baseParser_functions_to_try = [BaseParser.from_str, BaseParser.parse_diff]
raised_exception = None
for baseParser_function in baseParser_functions_to_try:
try:
return baseParser_function(string)
except Exception as e:
raised_exception = e
if raised_exception:
raise raised_exception
@staticmethod
def _datetime_parser(value):
return value
@staticmethod
def _timestamp_parser(value):
return datetime.datetime.fromtimestamp(value)
@staticmethod
def _special_parser(value):
return value
def _main_parser(func):
def wrapper(cls, *args, **kwargs):
value = args[0]
if isinstance(value, str_tuple):
method = BaseParser._str_parser(value)
elif isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
method = BaseParser._datetime_parser
elif isinstance(value, (int, float)):
method = BaseParser._timestamp_parser
else:
method = BaseParser._special_parser
if hasattr(method, '__call__'):
return method(value)
else:
raise UnexpectedTypeError(
'can not generate method for {value} type:{type}'.format(value=value, type=type(value)))
return wrapper
@classmethod
@_main_parser
def main(cls, value):
"""parse all type value"""
@staticmethod
def parse_datetime(string, formation=None):
if formation:
_stamp = datetime.datetime.strptime(string, formation)
elif len(string) >= 18:
_stamp = datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
elif len(string) < 18:
if '-' in string:
_stamp = datetime.datetime.strptime(string, '%y-%m-%d %H:%M:%S')
else:
try:
_stamp = datetime.datetime.strptime(string, '%Y%m%d %H:%M:%S')
except ValueError:
_stamp = datetime.datetime.strptime(string, '%y%m%d %H:%M:%S')
else:
raise CanNotFormatError('Need %Y-%m-%d %H:%M:%S or %y-%m-%d %H:%M:%S')
return _stamp
@staticmethod
def parse_date(string, formation=None):
"""
string to date stamp
:param string: date string
:param formation: format string
:return: datetime.date
"""
if formation:
_stamp = datetime.datetime.strptime(string, formation).date()
return _stamp
_string = string.replace('.', '-').replace('/', '-')
if '-' in _string:
if len(_string.split('-')[0]) > 3 or len(_string.split('-')[2]) > 3:
try:
_stamp = datetime.datetime.strptime(_string, '%Y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%Y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%Y').date()
else:
try:
_stamp = datetime.datetime.strptime(_string, '%y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%y').date()
else:
if len(_string) > 6:
try:
_stamp = datetime.datetime.strptime(_string, '%Y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%Y').date()
elif len(_string) <= 6:
try:
_stamp = datetime.datetime.strptime(_string, '%y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%y').date()
else:
raise CanNotFormatError
return _stamp
@staticmethod
def parse_time(string):
pass
@staticmethod
def parse_special(string):
pass
@staticmethod
@staticmethod
def from_str(date):
"""
Given a date in the format: Jan,21st.2015
will return a datetime of it.
"""
month = date[:3][0] + date[:3][-2:].lower()
if month not in NAMED_MONTHS:
raise CanNotFormatError('Month not recognized')
date = date.replace(',', '').replace(' ', '').replace('.', '')
try:
day_unit = [x for x in ['st', 'rd', 'nd', 'th'] if x in date][0]
day = int(re.search(r'\d+', date.split(day_unit)[0]).group())
year = int(re.search(r'\d+', date.split(day_unit)[1]).group())
numeric_month = NAMED_MONTHS[month]
return datetime.date(int(year), numeric_month, day)
except:
raise CanNotFormatError('Not well formatted. Expecting something like May,21st.2015')
|
shinux/PyTime
|
pytime/filter.py
|
BaseParser.from_str
|
python
|
def from_str(date):
month = date[:3][0] + date[:3][-2:].lower()
if month not in NAMED_MONTHS:
raise CanNotFormatError('Month not recognized')
date = date.replace(',', '').replace(' ', '').replace('.', '')
try:
day_unit = [x for x in ['st', 'rd', 'nd', 'th'] if x in date][0]
day = int(re.search(r'\d+', date.split(day_unit)[0]).group())
year = int(re.search(r'\d+', date.split(day_unit)[1]).group())
numeric_month = NAMED_MONTHS[month]
return datetime.date(int(year), numeric_month, day)
except:
raise CanNotFormatError('Not well formatted. Expecting something like May,21st.2015')
|
Given a date in the format: Jan,21st.2015
will return a datetime of it.
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/filter.py#L259-L276
| null |
class BaseParser(object):
"""Parse string to regular datetime/date type
1990-10-28 23:23:23 - 19
90-10-28 23:23:23 - 17
1990-10-28 - 10
28-10-1990 - 10
1990/10/28 - 10
28/10/1990 - 10
1990.10.28 - 10
28.10.1990 - 10
10-28-90 USA - 8
28-10-90 on hold - 8
10/28/90 USA - 8
28/10/90 on hold - 8
19901028 - 8
90-10-28 - 8
90/10/28 - 8
23:23:23 - 8
23:23 - 5
23:2 - 4
5:14 - 4
5:2 - 3
"""
def __init__(self, *args, **kwargs):
pass
@staticmethod
def _str_parser(string):
"""
return method by the length of string
:param string: string
:return: method
"""
if not any(c.isalpha() for c in string):
_string = string[:19]
_length = len(_string)
if _length > 10:
return BaseParser.parse_datetime
elif 6 <= _length <= 10:
if ':' in _string:
return BaseParser.parse_time
else:
return BaseParser.parse_date
elif _length < 6:
return BaseParser.parse_time
else:
return BaseParser.parse_special
else:
return BaseParser.__parse_not_only_str
@staticmethod
def __parse_not_only_str(string):
baseParser_functions_to_try = [BaseParser.from_str, BaseParser.parse_diff]
raised_exception = None
for baseParser_function in baseParser_functions_to_try:
try:
return baseParser_function(string)
except Exception as e:
raised_exception = e
if raised_exception:
raise raised_exception
@staticmethod
def _datetime_parser(value):
return value
@staticmethod
def _timestamp_parser(value):
return datetime.datetime.fromtimestamp(value)
@staticmethod
def _special_parser(value):
return value
def _main_parser(func):
def wrapper(cls, *args, **kwargs):
value = args[0]
if isinstance(value, str_tuple):
method = BaseParser._str_parser(value)
elif isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
method = BaseParser._datetime_parser
elif isinstance(value, (int, float)):
method = BaseParser._timestamp_parser
else:
method = BaseParser._special_parser
if hasattr(method, '__call__'):
return method(value)
else:
raise UnexpectedTypeError(
'can not generate method for {value} type:{type}'.format(value=value, type=type(value)))
return wrapper
@classmethod
@_main_parser
def main(cls, value):
"""parse all type value"""
@staticmethod
def parse_datetime(string, formation=None):
if formation:
_stamp = datetime.datetime.strptime(string, formation)
elif len(string) >= 18:
_stamp = datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
elif len(string) < 18:
if '-' in string:
_stamp = datetime.datetime.strptime(string, '%y-%m-%d %H:%M:%S')
else:
try:
_stamp = datetime.datetime.strptime(string, '%Y%m%d %H:%M:%S')
except ValueError:
_stamp = datetime.datetime.strptime(string, '%y%m%d %H:%M:%S')
else:
raise CanNotFormatError('Need %Y-%m-%d %H:%M:%S or %y-%m-%d %H:%M:%S')
return _stamp
@staticmethod
def parse_date(string, formation=None):
"""
string to date stamp
:param string: date string
:param formation: format string
:return: datetime.date
"""
if formation:
_stamp = datetime.datetime.strptime(string, formation).date()
return _stamp
_string = string.replace('.', '-').replace('/', '-')
if '-' in _string:
if len(_string.split('-')[0]) > 3 or len(_string.split('-')[2]) > 3:
try:
_stamp = datetime.datetime.strptime(_string, '%Y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%Y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%Y').date()
else:
try:
_stamp = datetime.datetime.strptime(_string, '%y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%y').date()
else:
if len(_string) > 6:
try:
_stamp = datetime.datetime.strptime(_string, '%Y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%Y').date()
elif len(_string) <= 6:
try:
_stamp = datetime.datetime.strptime(_string, '%y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%y').date()
else:
raise CanNotFormatError
return _stamp
@staticmethod
def parse_time(string):
pass
@staticmethod
def parse_special(string):
pass
@staticmethod
def parse_diff(base_str):
"""
parse string to regular timedelta
:param base_str: str
:return: dict
"""
temp_dict = {'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 0,
'seconds': 0}
_pure_str = re.findall("[a-zA-Z]+", base_str)
pure_num = [int(_) for _ in re.findall(r'\d+', base_str)]
pure_str = [filter_unit(_) for _ in _pure_str]
result_dict = dict(chain(temp_dict.items(), dict(zip(pure_str, pure_num)).items()))
if result_dict['months'] >= 12:
advance = result_dict['months'] // 12
remain = result_dict['months'] % 12
result_dict['years'] += advance
result_dict['months'] = remain
if result_dict['weeks']:
result_dict['days'] += result_dict['weeks'] * 7
return result_dict
@staticmethod
|
shinux/PyTime
|
pytime/pytime.py
|
today
|
python
|
def today(year=None):
return datetime.date(int(year), _date.month, _date.day) if year else _date
|
this day, last year
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L50-L52
| null |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
tomorrow
|
python
|
def tomorrow(date=None):
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
|
tomorrow is another day
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L55-L61
|
[
"def parse(value):\n return bp(value)\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
yesterday
|
python
|
def yesterday(date=None):
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
|
yesterday once more
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L64-L70
|
[
"def parse(value):\n return bp(value)\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
days_range
|
python
|
def days_range(first=None, second=None, wipe=False):
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
|
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L78-L93
|
[
"def parse(value):\n return bp(value)\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
last_day
|
python
|
def last_day(year=_year, month=_month):
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
|
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L96-L104
| null |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
midnight
|
python
|
def midnight(arg=None):
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
|
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L107-L120
|
[
"def parse(value):\n return bp(value)\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
before
|
python
|
def before(base=_datetime, diff=None):
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
|
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L123-L151
|
[
"def parse(value):\n return bp(value)\n",
"def parse_diff(base_str):\n \"\"\"\n parse string to regular timedelta\n :param base_str: str\n :return: dict\n \"\"\"\n temp_dict = {'years': 0,\n 'months': 0,\n 'weeks': 0,\n 'days': 0,\n 'hours': 0,\n 'minutes': 0,\n 'seconds': 0}\n\n _pure_str = re.findall(\"[a-zA-Z]+\", base_str)\n pure_num = [int(_) for _ in re.findall(r'\\d+', base_str)]\n pure_str = [filter_unit(_) for _ in _pure_str]\n result_dict = dict(chain(temp_dict.items(), dict(zip(pure_str, pure_num)).items()))\n if result_dict['months'] >= 12:\n advance = result_dict['months'] // 12\n remain = result_dict['months'] % 12\n result_dict['years'] += advance\n result_dict['months'] = remain\n if result_dict['weeks']:\n result_dict['days'] += result_dict['weeks'] * 7\n return result_dict\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
_datetime_to_date
|
python
|
def _datetime_to_date(arg):
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
|
convert datetime/str to date
:param arg:
:return:
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L182-L191
|
[
"def parse(value):\n return bp(value)\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
mother
|
python
|
def mother(year=None):
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
|
the 2nd Sunday in May
:param year: int
:return: Mother's day
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L260-L268
| null |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
father
|
python
|
def father(year=None):
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
|
the 3rd Sunday in June
:param year: int
:return: Father's day
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L271-L279
| null |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
easter
|
python
|
def easter(year=None):
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
|
1900 - 2099 limit
:param year: int
:return: Easter day
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L286-L303
| null |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
shinux/PyTime
|
pytime/pytime.py
|
thanks
|
python
|
def thanks(year=None):
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
|
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
|
train
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L306-L318
| null |
#!/usr/bin/env python
# encoding: utf-8
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
|
achiku/jungle
|
jungle/rds.py
|
format_output
|
python
|
def format_output(instances, flag):
out = []
line_format = '{0}\t{1}\t{2}\t{3}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len+5) + '}{1:<16}{2:<65}{3:<16}'
for i in instances:
endpoint = "{0}:{1}".format(i['Endpoint']['Address'], i['Endpoint']['Port'])
out.append(
line_format.format(i['DBInstanceIdentifier'], i['DBInstanceStatus'], endpoint, i['Engine']))
return out
|
return formatted string per instance
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/rds.py#L6-L18
|
[
"def _get_max_name_len(instances):\n \"\"\"get max length of Tag:Name\"\"\"\n for i in instances:\n return max([len(i['DBInstanceIdentifier']) for i in instances])\n return 0\n"
] |
# -*- coding: utf-8 -*-
import click
from jungle.session import create_session
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
for i in instances:
return max([len(i['DBInstanceIdentifier']) for i in instances])
return 0
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""RDS CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List RDS instances')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, list_formatted):
"""List RDS instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
rds = session.client('rds')
instances = rds.describe_db_instances()
out = format_output(instances['DBInstances'], list_formatted)
click.echo('\n'.join(out))
|
achiku/jungle
|
jungle/rds.py
|
ls
|
python
|
def ls(ctx, list_formatted):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
rds = session.client('rds')
instances = rds.describe_db_instances()
out = format_output(instances['DBInstances'], list_formatted)
click.echo('\n'.join(out))
|
List RDS instances
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/rds.py#L39-L46
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n",
"def format_output(instances, flag):\n \"\"\"return formatted string per instance\"\"\"\n out = []\n line_format = '{0}\\t{1}\\t{2}\\t{3}'\n name_len = _get_max_name_len(instances) + 3\n if flag:\n line_format = '{0:<' + str(name_len+5) + '}{1:<16}{2:<65}{3:<16}'\n\n for i in instances:\n endpoint = \"{0}:{1}\".format(i['Endpoint']['Address'], i['Endpoint']['Port'])\n out.append(\n line_format.format(i['DBInstanceIdentifier'], i['DBInstanceStatus'], endpoint, i['Engine']))\n return out\n"
] |
# -*- coding: utf-8 -*-
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string per instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len+5) + '}{1:<16}{2:<65}{3:<16}'
for i in instances:
endpoint = "{0}:{1}".format(i['Endpoint']['Address'], i['Endpoint']['Port'])
out.append(
line_format.format(i['DBInstanceIdentifier'], i['DBInstanceStatus'], endpoint, i['Engine']))
return out
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
for i in instances:
return max([len(i['DBInstanceIdentifier']) for i in instances])
return 0
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""RDS CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List RDS instances')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
|
achiku/jungle
|
jungle/emr.py
|
ls
|
python
|
def ls(ctx, name):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
results = client.list_clusters(
ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'WAITING']
)
for cluster in results['Clusters']:
click.echo("{0}\t{1}\t{2}".format(cluster['Id'], cluster['Name'], cluster['Status']['State']))
|
List EMR instances
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/emr.py#L20-L29
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n"
] |
# -*- coding: utf-8 -*-
import subprocess
import click
from botocore.exceptions import ClientError
from jungle.session import create_session
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EMR CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EMR clusters')
@click.argument('name', default='*')
@click.pass_context
@cli.command(help='SSH to EMR master node')
@click.option('--cluster-id', '-i', required=True, help='EMR cluster id')
@click.option('--key-file', '-k', required=True, help='SSH Key file path', type=click.Path())
@click.pass_context
def ssh(ctx, cluster_id, key_file):
"""SSH login to EMR master node"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
ssh_options = '-o StrictHostKeyChecking=no -o ServerAliveInterval=10'
cmd = 'ssh {ssh_options} -i {key_file} hadoop@{target_dns}'.format(
ssh_options=ssh_options, key_file=key_file, target_dns=target_dns)
subprocess.call(cmd, shell=True)
@cli.command(help='Terminate a EMR cluster')
@click.option('--cluster-id', '-i', required=True, help='EMR cluster id')
@click.pass_context
def rm(ctx, cluster_id):
"""Terminate a EMR cluster"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
try:
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
flag = click.prompt(
"Are you sure you want to terminate {0}: {1}? [y/Y]".format(
cluster_id, target_dns), type=str, default='n')
if flag.lower() == 'y':
result = client.terminate_job_flows(JobFlowIds=[cluster_id])
except ClientError as e:
click.echo(e, err=True)
|
achiku/jungle
|
jungle/emr.py
|
ssh
|
python
|
def ssh(ctx, cluster_id, key_file):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
ssh_options = '-o StrictHostKeyChecking=no -o ServerAliveInterval=10'
cmd = 'ssh {ssh_options} -i {key_file} hadoop@{target_dns}'.format(
ssh_options=ssh_options, key_file=key_file, target_dns=target_dns)
subprocess.call(cmd, shell=True)
|
SSH login to EMR master node
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/emr.py#L36-L46
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n"
] |
# -*- coding: utf-8 -*-
import subprocess
import click
from botocore.exceptions import ClientError
from jungle.session import create_session
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EMR CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EMR clusters')
@click.argument('name', default='*')
@click.pass_context
def ls(ctx, name):
"""List EMR instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
results = client.list_clusters(
ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'WAITING']
)
for cluster in results['Clusters']:
click.echo("{0}\t{1}\t{2}".format(cluster['Id'], cluster['Name'], cluster['Status']['State']))
@cli.command(help='SSH to EMR master node')
@click.option('--cluster-id', '-i', required=True, help='EMR cluster id')
@click.option('--key-file', '-k', required=True, help='SSH Key file path', type=click.Path())
@click.pass_context
@cli.command(help='Terminate a EMR cluster')
@click.option('--cluster-id', '-i', required=True, help='EMR cluster id')
@click.pass_context
def rm(ctx, cluster_id):
"""Terminate a EMR cluster"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
try:
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
flag = click.prompt(
"Are you sure you want to terminate {0}: {1}? [y/Y]".format(
cluster_id, target_dns), type=str, default='n')
if flag.lower() == 'y':
result = client.terminate_job_flows(JobFlowIds=[cluster_id])
except ClientError as e:
click.echo(e, err=True)
|
achiku/jungle
|
jungle/emr.py
|
rm
|
python
|
def rm(ctx, cluster_id):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
try:
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
flag = click.prompt(
"Are you sure you want to terminate {0}: {1}? [y/Y]".format(
cluster_id, target_dns), type=str, default='n')
if flag.lower() == 'y':
result = client.terminate_job_flows(JobFlowIds=[cluster_id])
except ClientError as e:
click.echo(e, err=True)
|
Terminate a EMR cluster
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/emr.py#L52-L66
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n"
] |
# -*- coding: utf-8 -*-
import subprocess
import click
from botocore.exceptions import ClientError
from jungle.session import create_session
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EMR CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EMR clusters')
@click.argument('name', default='*')
@click.pass_context
def ls(ctx, name):
"""List EMR instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
results = client.list_clusters(
ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'WAITING']
)
for cluster in results['Clusters']:
click.echo("{0}\t{1}\t{2}".format(cluster['Id'], cluster['Name'], cluster['Status']['State']))
@cli.command(help='SSH to EMR master node')
@click.option('--cluster-id', '-i', required=True, help='EMR cluster id')
@click.option('--key-file', '-k', required=True, help='SSH Key file path', type=click.Path())
@click.pass_context
def ssh(ctx, cluster_id, key_file):
"""SSH login to EMR master node"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
ssh_options = '-o StrictHostKeyChecking=no -o ServerAliveInterval=10'
cmd = 'ssh {ssh_options} -i {key_file} hadoop@{target_dns}'.format(
ssh_options=ssh_options, key_file=key_file, target_dns=target_dns)
subprocess.call(cmd, shell=True)
@cli.command(help='Terminate a EMR cluster')
@click.option('--cluster-id', '-i', required=True, help='EMR cluster id')
@click.pass_context
|
achiku/jungle
|
jungle/ec2.py
|
format_output
|
python
|
def format_output(instances, flag):
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
|
return formatted string for instance
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L10-L22
|
[
"def _get_max_name_len(instances):\n \"\"\"get max length of Tag:Name\"\"\"\n # FIXME: ec2.instanceCollection doesn't have __len__\n for i in instances:\n return max([len(get_tag_value(i.tags, 'Name')) for i in instances])\n return 0\n",
"def get_tag_value(x, key):\n \"\"\"Get a value from tag\"\"\"\n if x is None:\n return ''\n result = [y['Value'] for y in x if y['Key'] == key]\n if result:\n return result[0]\n return ''\n"
] |
# -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
|
achiku/jungle
|
jungle/ec2.py
|
_get_max_name_len
|
python
|
def _get_max_name_len(instances):
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
|
get max length of Tag:Name
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L35-L40
| null |
# -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
|
achiku/jungle
|
jungle/ec2.py
|
get_tag_value
|
python
|
def get_tag_value(x, key):
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
|
Get a value from tag
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L43-L50
| null |
# -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
|
achiku/jungle
|
jungle/ec2.py
|
ls
|
python
|
def ls(ctx, name, list_formatted):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
|
List EC2 instances
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L65-L75
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n",
"def format_output(instances, flag):\n \"\"\"return formatted string for instance\"\"\"\n out = []\n line_format = '{0}\\t{1}\\t{2}\\t{3}\\t{4}'\n name_len = _get_max_name_len(instances) + 3\n if flag:\n line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'\n\n for i in instances:\n tag_name = get_tag_value(i.tags, 'Name')\n out.append(line_format.format(\n tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))\n return out\n"
] |
# -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
|
achiku/jungle
|
jungle/ec2.py
|
up
|
python
|
def up(ctx, instance_id):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
|
Start EC2 instance
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L81-L90
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n"
] |
# -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
|
achiku/jungle
|
jungle/ec2.py
|
create_ssh_command
|
python
|
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
|
Create SSH Login command string
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L108-L169
|
[
"def get_tag_value(x, key):\n \"\"\"Get a value from tag\"\"\"\n if x is None:\n return ''\n result = [y['Value'] for y in x if y['Key'] == key]\n if result:\n return result[0]\n return ''\n",
"def _get_instance_ip_address(instance, use_private_ip=False):\n if use_private_ip:\n return instance.private_ip_address\n elif instance.public_ip_address is not None:\n return instance.public_ip_address\n else:\n click.echo(\"Public IP address not set. Attempting to use the private IP address.\")\n return instance.private_ip_address\n",
"def build_option_username(username):\n if username is None:\n return ''\n else:\n return ' -l {0}'.format(username)\n"
] |
# -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
|
achiku/jungle
|
jungle/ec2.py
|
ssh
|
python
|
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
|
SSH to EC2 instance
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L191-L212
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n",
"def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,\n use_private_ip, gateway_instance_id, gateway_username):\n \"\"\"Create SSH Login command string\"\"\"\n ec2 = session.resource('ec2')\n if instance_id is not None:\n try:\n instance = ec2.Instance(instance_id)\n hostname = _get_instance_ip_address(instance, use_private_ip)\n except botocore.exceptions.ClientError as e:\n click.echo(\"Invalid instance ID {0} ({1})\".format(instance_id, e), err=True)\n sys.exit(2)\n elif instance_name is not None:\n try:\n conditions = [\n {'Name': 'tag:Name', 'Values': [instance_name]},\n {'Name': 'instance-state-name', 'Values': ['running']},\n ]\n instances = ec2.instances.filter(Filters=conditions)\n target_instances = []\n for idx, i in enumerate(instances):\n target_instances.append(i)\n if len(target_instances) == 1:\n instance = target_instances[0]\n hostname = _get_instance_ip_address(instance, use_private_ip)\n else:\n for idx, i in enumerate(instances):\n tag_name = get_tag_value(i.tags, 'Name')\n click.echo('[{0}]: {1}\\t{2}\\t{3}\\t{4}\\t{5}'.format(\n idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))\n selected_idx = click.prompt(\"Please enter a valid number\", type=int, default=0)\n if len(target_instances) - 1 < selected_idx or selected_idx < 0:\n click.echo(\"selected number [{0}] is invalid\".format(selected_idx), err=True)\n sys.exit(2)\n click.echo(\"{0} is selected.\".format(selected_idx))\n instance = target_instances[selected_idx]\n hostname = _get_instance_ip_address(instance, use_private_ip)\n except botocore.exceptions.ClientError as e:\n click.echo(\"Invalid instance ID {0} ({1})\".format(instance_id, e), err=True)\n sys.exit(2)\n # TODO: need to refactor and make it testable\n if key_file is None:\n key_file_option = ''\n else:\n key_file_option = ' -i {0}'.format(key_file)\n\n gateway_username_option = build_option_username(gateway_username)\n username_option = build_option_username(username)\n\n if ssh_options is None:\n ssh_options = ''\n else:\n ssh_options = ' {0}'.format(ssh_options)\n if gateway_instance_id is not None:\n gateway_instance = ec2.Instance(gateway_instance_id)\n gateway_public_ip = gateway_instance.public_ip_address\n hostname = instance.private_ip_address\n cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(\n gateway_username_option, gateway_public_ip, key_file_option,\n port, ssh_options, username_option, hostname)\n else:\n cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)\n return cmd\n"
] |
# -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
|
achiku/jungle
|
jungle/cli.py
|
JungleCLI.get_command
|
python
|
def get_command(self, ctx, name):
try:
mod = __import__('jungle.' + name, None, None, ['cli'])
return mod.cli
except ImportError:
pass
|
get command
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/cli.py#L15-L21
| null |
class JungleCLI(click.MultiCommand):
"""Jangle CLI main class"""
def list_commands(self, ctx):
"""return available modules"""
return ['ec2', 'elb', 'emr', 'asg', 'rds']
|
achiku/jungle
|
jungle/elb.py
|
ls
|
python
|
def ls(ctx, name, list_instances):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('elb')
inst = {'LoadBalancerDescriptions': []}
if name == '*':
inst = client.describe_load_balancers()
else:
try:
inst = client.describe_load_balancers(LoadBalancerNames=[name])
except ClientError as e:
click.echo(e, err=True)
for i in inst['LoadBalancerDescriptions']:
click.echo(i['LoadBalancerName'])
if list_instances:
for ec2 in i['Instances']:
health = client.describe_instance_health(
LoadBalancerName=name,
Instances=[ec2]
)
click.echo('{0}\t{1}'.format(ec2['InstanceId'], health['InstanceStates'][0]['State']))
|
List ELB instances
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/elb.py#L19-L41
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n"
] |
# -*- coding: utf-8 -*-
import click
from botocore.exceptions import ClientError
from jungle.session import create_session
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""ELB CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List ELB instances')
@click.argument('name', default='*')
@click.option('--list-instances', '-l', 'list_instances', is_flag=True, help='List attached EC2 instances')
@click.pass_context
|
achiku/jungle
|
jungle/asg.py
|
format_output
|
python
|
def format_output(groups, flag):
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}'
for g in groups['AutoScalingGroups']:
out.append(line_format.format(
g['AutoScalingGroupName'],
g['LaunchConfigurationName'],
'desired:'+str(g['DesiredCapacity']),
'max:'+str(g['MaxSize']),
'min:'+str(g['MinSize']),
g['CreatedTime'].strftime('%Y/%m/%d %H:%M:%S'),
))
return out
|
return formatted string for instance
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/asg.py#L6-L19
| null |
# -*- coding: utf-8 -*-
import click
from jungle.session import create_session
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""AutoScaling CLI group"""
ctx.obj = {}
ctx.obj['AWS_PROFILE_NAME'] = profile_name
@cli.command(help='List AutoScaling groups')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List AutoScaling groups"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('autoscaling')
if name == "*":
groups = client.describe_auto_scaling_groups()
else:
groups = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[
name,
]
)
out = format_output(groups, list_formatted)
click.echo('\n'.join(out))
|
achiku/jungle
|
jungle/asg.py
|
ls
|
python
|
def ls(ctx, name, list_formatted):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('autoscaling')
if name == "*":
groups = client.describe_auto_scaling_groups()
else:
groups = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[
name,
]
)
out = format_output(groups, list_formatted)
click.echo('\n'.join(out))
|
List AutoScaling groups
|
train
|
https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/asg.py#L35-L49
|
[
"def create_session(profile_name):\n if profile_name is None:\n return boto3\n else:\n try:\n session = boto3.Session(profile_name=profile_name)\n return session\n except botocore.exceptions.ProfileNotFound as e:\n click.echo(\"Invalid profile name: {0}\".format(profile_name, e), err=True)\n sys.exit(2)\n",
"def format_output(groups, flag):\n \"\"\"return formatted string for instance\"\"\"\n out = []\n line_format = '{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}'\n for g in groups['AutoScalingGroups']:\n out.append(line_format.format(\n g['AutoScalingGroupName'],\n g['LaunchConfigurationName'],\n 'desired:'+str(g['DesiredCapacity']),\n 'max:'+str(g['MaxSize']),\n 'min:'+str(g['MinSize']),\n g['CreatedTime'].strftime('%Y/%m/%d %H:%M:%S'),\n ))\n return out\n"
] |
# -*- coding: utf-8 -*-
import click
from jungle.session import create_session
def format_output(groups, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}'
for g in groups['AutoScalingGroups']:
out.append(line_format.format(
g['AutoScalingGroupName'],
g['LaunchConfigurationName'],
'desired:'+str(g['DesiredCapacity']),
'max:'+str(g['MaxSize']),
'min:'+str(g['MinSize']),
g['CreatedTime'].strftime('%Y/%m/%d %H:%M:%S'),
))
return out
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""AutoScaling CLI group"""
ctx.obj = {}
ctx.obj['AWS_PROFILE_NAME'] = profile_name
@cli.command(help='List AutoScaling groups')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
|
Celeo/Preston
|
preston/preston.py
|
Preston._get_access_from_refresh
|
python
|
def _get_access_from_refresh(self) -> Tuple[str, float]:
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
|
Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L91-L109
|
[
"def _get_authorization_headers(self) -> dict:\n \"\"\"Constructs and returns the Authorization header for the client app.\n\n Args:\n None\n\n Returns:\n header dict for communicating with the authorization endpoints\n \"\"\"\n auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')\n auth = auth.replace('\\n', '').replace(' ', '')\n auth = 'Basic {}'.format(auth)\n headers = {'Authorization': auth}\n return headers\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston._get_authorization_headers
|
python
|
def _get_authorization_headers(self) -> dict:
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
|
Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L111-L124
| null |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston._try_refresh_access_token
|
python
|
def _try_refresh_access_token(self) -> None:
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
|
Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L126-L142
|
[
"def _get_access_from_refresh(self) -> Tuple[str, float]:\n \"\"\"Uses the stored refresh token to get a new access token.\n\n This method assumes that the refresh token exists.\n\n Args:\n None\n\n Returns:\n new access token and expiration time (from now)\n \"\"\"\n headers = self._get_authorization_headers()\n data = {\n 'grant_type': 'refresh_token',\n 'refresh_token': self.refresh_token\n }\n r = self.session.post(self.TOKEN_URL, headers=headers, data=data)\n response_data = r.json()\n return (response_data['access_token'], response_data['expires_in'])\n",
"def _is_access_token_expired(self) -> bool:\n \"\"\"Returns true if the stored access token has expired.\n\n Args:\n None\n\n Returns:\n True if the access token is expired\n \"\"\"\n return time.time() > self.access_expiration\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston.authenticate
|
python
|
def authenticate(self, code: str) -> 'Preston':
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
|
Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L173-L201
|
[
"def _get_authorization_headers(self) -> dict:\n \"\"\"Constructs and returns the Authorization header for the client app.\n\n Args:\n None\n\n Returns:\n header dict for communicating with the authorization endpoints\n \"\"\"\n auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')\n auth = auth.replace('\\n', '').replace(' ', '')\n auth = 'Basic {}'.format(auth)\n headers = {'Authorization': auth}\n return headers\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston._get_spec
|
python
|
def _get_spec(self) -> dict:
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
|
Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L220-L234
| null |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston._get_path_for_op_id
|
python
|
def _get_path_for_op_id(self, id: str) -> Optional[str]:
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
|
Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L236-L251
|
[
"def _get_spec(self) -> dict:\n \"\"\"Fetches the OpenAPI spec from the server.\n\n If the spec has already been fetched, the cached version is returned instead.\n\n ArgS:\n None\n\n Returns:\n OpenAPI spec data\n \"\"\"\n if self.spec:\n return self.spec\n self.spec = requests.get(self.SPEC_URL.format(self.version)).json()\n return self.spec\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston._insert_vars
|
python
|
def _insert_vars(self, path: str, data: dict) -> str:
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
|
Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L253-L270
| null |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston.whoami
|
python
|
def whoami(self) -> dict:
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
|
Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L272-L287
|
[
"def _try_refresh_access_token(self) -> None:\n \"\"\"Attempts to get a new access token using the refresh token, if needed.\n\n If the access token is expired and this instance has a stored refresh token,\n then the refresh token is in the API call to get a new access token. If\n successful, this instance is modified in-place with that new access token.\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n if self.refresh_token:\n if not self.access_token or self._is_access_token_expired():\n self.access_token, self.access_expiration = self._get_access_from_refresh()\n self.access_expiration = time.time() + self.access_expiration\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston.get_path
|
python
|
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
|
Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L289-L311
|
[
"def _try_refresh_access_token(self) -> None:\n \"\"\"Attempts to get a new access token using the refresh token, if needed.\n\n If the access token is expired and this instance has a stored refresh token,\n then the refresh token is in the API call to get a new access token. If\n successful, this instance is modified in-place with that new access token.\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n if self.refresh_token:\n if not self.access_token or self._is_access_token_expired():\n self.access_token, self.access_expiration = self._get_access_from_refresh()\n self.access_expiration = time.time() + self.access_expiration\n",
"def _insert_vars(self, path: str, data: dict) -> str:\n \"\"\"Inserts variables into the ESI URL path.\n\n Args:\n path: raw ESI URL path\n data: data to insert into the URL\n\n Returns:\n path with variables filled\n \"\"\"\n data = data.copy()\n while True:\n match = re.search(self.VAR_REPLACE_REGEX, path)\n if not match:\n return path\n replace_from = match.group(0)\n replace_with = str(data.get(match.group(1)))\n path = path.replace(replace_from, replace_with)\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston.get_op
|
python
|
def get_op(self, id: str, **kwargs: str) -> dict:
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
|
Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L313-L328
|
[
"def _get_path_for_op_id(self, id: str) -> Optional[str]:\n \"\"\"Searches the spec for a path matching the operation id.\n\n Args:\n id: operation id\n\n Returns:\n path to the endpoint, or None if not found\n \"\"\"\n for path_key, path_value in self._get_spec()['paths'].items():\n for method in self.METHODS:\n if method in path_value:\n if self.OPERATION_ID_KEY in path_value[method]:\n if path_value[method][self.OPERATION_ID_KEY] == id:\n return path_key\n return None\n",
"def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:\n \"\"\"Queries the ESI by an endpoint URL.\n\n This method is not marked \"private\" as it _can_ be used\n by consuming code, but it's probably easier to call the\n `get_op` method instead.\n\n Args:\n path: raw ESI URL path\n data: data to insert into the URL\n\n Returns:\n ESI data\n \"\"\"\n path = self._insert_vars(path, data)\n path = self.BASE_URL + path\n data = self.cache.check(path)\n if data:\n return data\n self._try_refresh_access_token()\n r = self.session.get(path)\n self.cache.set(r)\n return r.json()\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston.post_path
|
python
|
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
|
Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L330-L348
|
[
"def _try_refresh_access_token(self) -> None:\n \"\"\"Attempts to get a new access token using the refresh token, if needed.\n\n If the access token is expired and this instance has a stored refresh token,\n then the refresh token is in the API call to get a new access token. If\n successful, this instance is modified in-place with that new access token.\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n if self.refresh_token:\n if not self.access_token or self._is_access_token_expired():\n self.access_token, self.access_expiration = self._get_access_from_refresh()\n self.access_expiration = time.time() + self.access_expiration\n",
"def _insert_vars(self, path: str, data: dict) -> str:\n \"\"\"Inserts variables into the ESI URL path.\n\n Args:\n path: raw ESI URL path\n data: data to insert into the URL\n\n Returns:\n path with variables filled\n \"\"\"\n data = data.copy()\n while True:\n match = re.search(self.VAR_REPLACE_REGEX, path)\n if not match:\n return path\n replace_from = match.group(0)\n replace_with = str(data.get(match.group(1)))\n path = path.replace(replace_from, replace_with)\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Celeo/Preston
|
preston/preston.py
|
Preston.post_op
|
python
|
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data)
|
Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L350-L362
|
[
"def _get_path_for_op_id(self, id: str) -> Optional[str]:\n \"\"\"Searches the spec for a path matching the operation id.\n\n Args:\n id: operation id\n\n Returns:\n path to the endpoint, or None if not found\n \"\"\"\n for path_key, path_value in self._get_spec()['paths'].items():\n for method in self.METHODS:\n if method in path_value:\n if self.OPERATION_ID_KEY in path_value[method]:\n if path_value[method][self.OPERATION_ID_KEY] == id:\n return path_key\n return None\n",
"def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:\n \"\"\"Modifies the ESI by an endpoint URL.\n\n This method is not marked \"private\" as it _can_ be used\n by consuming code, but it's probably easier to call the\n `get_op` method instead.\n\n Args:\n path: raw ESI URL path\n path_data: data to format the path with (can be None)\n post_data: data to send to ESI\n\n Returns:\n ESI data\n \"\"\"\n path = self._insert_vars(path, path_data or {})\n path = self.BASE_URL + path\n self._try_refresh_access_token()\n return self.session.post(path, json=post_data).json()\n"
] |
class Preston:
"""Preston class.
This class is used to interface with the EVE Online "ESI" API.
The __init__ method only **kwargs instead of a specific
listing of arguments; here's the list of useful key-values:
version version of the spec to load
user_agent user-agent to use
client_id app's client id
client_secret app's client secret
callback_url app's callback url
scope app's scope(s)
access_token if supplied along with access_expiration,
Preston will make authenticated calls to ESI
access_expiration see above
refresh_token if supplied, Preston will use it to get new
access tokens; can be supplied with or without
access_token and access_expiration
Args:
kwargs: various configuration options
"""
BASE_URL = 'https://esi.tech.ccp.is'
SPEC_URL = BASE_URL + '/_{}/swagger.json'
OAUTH_URL = 'https://login.eveonline.com/oauth/'
TOKEN_URL = OAUTH_URL + 'token'
AUTHORIZE_URL = OAUTH_URL + 'authorize'
WHOAMI_URL = OAUTH_URL + 'verify'
METHODS = ['get', 'post', 'put', 'delete']
OPERATION_ID_KEY = 'operationId'
VAR_REPLACE_REGEX = r'{(\w+)}'
def __init__(self, **kwargs: str) -> None:
self.cache = Cache()
self.spec = None
self.version = kwargs.get('version', 'latest')
self.session = requests.Session()
self.session.headers.update({
'User-Agent': kwargs.get('user_agent', ''),
'Accept': 'application/json'
})
self.client_id = kwargs.get('client_id')
self.client_secret = kwargs.get('client_secret')
self.callback_url = kwargs.get('callback_url')
self.scope = kwargs.get('scope', '')
self.access_token = kwargs.get('access_token')
self.access_expiration = kwargs.get('access_expiration')
self.refresh_token = kwargs.get('refresh_token')
self._kwargs = kwargs
if not kwargs.get('no_update_token', False):
self._try_refresh_access_token()
self._update_access_token_header()
def copy(self) -> 'Preston':
"""Creates a copy of this Preston object.
The returned instance is not connected to this, so you can set
whichever headers or other data you want without impacting this instance.
The configuration of the returned instance will match the (original)
configuration of this instance - the kwargs are reused.
Args:
None
Returns:
new Preston instance
"""
return Preston(**self._kwargs)
def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in'])
def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers
def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration
def _is_access_token_expired(self) -> bool:
"""Returns true if the stored access token has expired.
Args:
None
Returns:
True if the access token is expired
"""
return time.time() > self.access_expiration
def get_authorize_url(self) -> str:
"""Constructs and returns the authorization URL.
This is the URL that a user will have to navigate to in their browser
and complete the login and authorization flow. Upon completion, they
will be redirected to your app's callback URL.
Args:
None
Returns:
URL
"""
return (
f'{self.AUTHORIZE_URL}?response_type=code&redirect_uri={self.callback_url}'
f'&client_id={self.client_id}&scope={self.scope}'
)
def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs)
def _update_access_token_header(self) -> None:
"""Updates the requests session with the access token header.
This method does nothing if this instance does not have a
stored access token.
Args:
None
Returns:
None
"""
if self.access_token:
self.session.headers.update({
'Authorization': f'Bearer {self.access_token}'
})
def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None
def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json()
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json()
def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs)
def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json()
|
Celeo/Preston
|
preston/cache.py
|
Cache._get_expiration
|
python
|
def _get_expiration(self, headers: dict) -> int:
expiration_str = headers.get('expires')
if not expiration_str:
return 0
expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = (expiration - datetime.utcnow()).total_seconds()
return math.ceil(abs(delta))
|
Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/cache.py#L23-L37
| null |
class Cache:
def __init__(self):
"""Cache class.
The cache is desgined to respect the caching rules of ESI as to
not request a page more often than it is updated by the server.
Args:
None
Returns:
None
"""
self.data: dict = {}
def set(self, response: 'requests.Response') -> None:
"""Adds a response to the cache.
Args:
response: response from ESI
Returns:
None
"""
self.data[response.url] = SavedEndpoint(
response.json(),
self._get_expiration(response.headers)
)
def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':
"""Checks the expiration time for data for a url.
If the data has expired, it is deleted from the cache.
Args:
url: url to check
data: page of data for that url
Returns:
value of either the passed data or None if it expired
"""
if data.expires_after < time.time():
del self.data[url]
data = None
return data
def check(self, url: str) -> Optional[dict]:
"""Check if data for a url has expired.
Data is not fetched again if it has expired.
Args:
url: url to check expiration on
Returns:
value of the data, possibly None
"""
data = self.data.get(url)
if data:
data = self._check_expiration(url, data)
return data.data if data else None
def __len__(self) -> int:
"""Returns the number of items in the stored data.
More of a debugging tool, since getting the number of dictionary keys
isn't a good indicator of how much data is actually here.
Args:
None
Returns:
value of the number of keys in the data
"""
return len(self.data.keys())
|
Celeo/Preston
|
preston/cache.py
|
Cache.set
|
python
|
def set(self, response: 'requests.Response') -> None:
self.data[response.url] = SavedEndpoint(
response.json(),
self._get_expiration(response.headers)
)
|
Adds a response to the cache.
Args:
response: response from ESI
Returns:
None
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/cache.py#L39-L51
|
[
"def _get_expiration(self, headers: dict) -> int:\n \"\"\"Gets the expiration time of the data from the response headers.\n\n Args:\n headers: dictionary of headers from ESI\n\n Returns:\n value of seconds from now the data expires\n \"\"\"\n expiration_str = headers.get('expires')\n if not expiration_str:\n return 0\n expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')\n delta = (expiration - datetime.utcnow()).total_seconds()\n return math.ceil(abs(delta))\n"
] |
class Cache:
def __init__(self):
"""Cache class.
The cache is desgined to respect the caching rules of ESI as to
not request a page more often than it is updated by the server.
Args:
None
Returns:
None
"""
self.data: dict = {}
def _get_expiration(self, headers: dict) -> int:
"""Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires
"""
expiration_str = headers.get('expires')
if not expiration_str:
return 0
expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = (expiration - datetime.utcnow()).total_seconds()
return math.ceil(abs(delta))
def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':
"""Checks the expiration time for data for a url.
If the data has expired, it is deleted from the cache.
Args:
url: url to check
data: page of data for that url
Returns:
value of either the passed data or None if it expired
"""
if data.expires_after < time.time():
del self.data[url]
data = None
return data
def check(self, url: str) -> Optional[dict]:
"""Check if data for a url has expired.
Data is not fetched again if it has expired.
Args:
url: url to check expiration on
Returns:
value of the data, possibly None
"""
data = self.data.get(url)
if data:
data = self._check_expiration(url, data)
return data.data if data else None
def __len__(self) -> int:
"""Returns the number of items in the stored data.
More of a debugging tool, since getting the number of dictionary keys
isn't a good indicator of how much data is actually here.
Args:
None
Returns:
value of the number of keys in the data
"""
return len(self.data.keys())
|
Celeo/Preston
|
preston/cache.py
|
Cache._check_expiration
|
python
|
def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':
if data.expires_after < time.time():
del self.data[url]
data = None
return data
|
Checks the expiration time for data for a url.
If the data has expired, it is deleted from the cache.
Args:
url: url to check
data: page of data for that url
Returns:
value of either the passed data or None if it expired
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/cache.py#L53-L68
| null |
class Cache:
def __init__(self):
"""Cache class.
The cache is desgined to respect the caching rules of ESI as to
not request a page more often than it is updated by the server.
Args:
None
Returns:
None
"""
self.data: dict = {}
def _get_expiration(self, headers: dict) -> int:
"""Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires
"""
expiration_str = headers.get('expires')
if not expiration_str:
return 0
expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = (expiration - datetime.utcnow()).total_seconds()
return math.ceil(abs(delta))
def set(self, response: 'requests.Response') -> None:
"""Adds a response to the cache.
Args:
response: response from ESI
Returns:
None
"""
self.data[response.url] = SavedEndpoint(
response.json(),
self._get_expiration(response.headers)
)
def check(self, url: str) -> Optional[dict]:
"""Check if data for a url has expired.
Data is not fetched again if it has expired.
Args:
url: url to check expiration on
Returns:
value of the data, possibly None
"""
data = self.data.get(url)
if data:
data = self._check_expiration(url, data)
return data.data if data else None
def __len__(self) -> int:
"""Returns the number of items in the stored data.
More of a debugging tool, since getting the number of dictionary keys
isn't a good indicator of how much data is actually here.
Args:
None
Returns:
value of the number of keys in the data
"""
return len(self.data.keys())
|
Celeo/Preston
|
preston/cache.py
|
Cache.check
|
python
|
def check(self, url: str) -> Optional[dict]:
data = self.data.get(url)
if data:
data = self._check_expiration(url, data)
return data.data if data else None
|
Check if data for a url has expired.
Data is not fetched again if it has expired.
Args:
url: url to check expiration on
Returns:
value of the data, possibly None
|
train
|
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/cache.py#L70-L84
|
[
"def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':\n \"\"\"Checks the expiration time for data for a url.\n\n If the data has expired, it is deleted from the cache.\n\n Args:\n url: url to check\n data: page of data for that url\n\n Returns:\n value of either the passed data or None if it expired\n \"\"\"\n if data.expires_after < time.time():\n del self.data[url]\n data = None\n return data\n"
] |
class Cache:
def __init__(self):
"""Cache class.
The cache is desgined to respect the caching rules of ESI as to
not request a page more often than it is updated by the server.
Args:
None
Returns:
None
"""
self.data: dict = {}
def _get_expiration(self, headers: dict) -> int:
"""Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires
"""
expiration_str = headers.get('expires')
if not expiration_str:
return 0
expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = (expiration - datetime.utcnow()).total_seconds()
return math.ceil(abs(delta))
def set(self, response: 'requests.Response') -> None:
"""Adds a response to the cache.
Args:
response: response from ESI
Returns:
None
"""
self.data[response.url] = SavedEndpoint(
response.json(),
self._get_expiration(response.headers)
)
def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':
"""Checks the expiration time for data for a url.
If the data has expired, it is deleted from the cache.
Args:
url: url to check
data: page of data for that url
Returns:
value of either the passed data or None if it expired
"""
if data.expires_after < time.time():
del self.data[url]
data = None
return data
def __len__(self) -> int:
"""Returns the number of items in the stored data.
More of a debugging tool, since getting the number of dictionary keys
isn't a good indicator of how much data is actually here.
Args:
None
Returns:
value of the number of keys in the data
"""
return len(self.data.keys())
|
almarklein/pyelastix
|
pyelastix.py
|
_find_executables
|
python
|
def _find_executables(name):
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
|
Try to find an executable.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L76-L140
|
[
"def add(*dirs):\n for d in dirs:\n if d and d not in possible_locations and os.path.isdir(d):\n possible_locations.append(d)\n",
"def do_check_version(exe):\n try:\n return subprocess.check_output([exe, '--version']).decode().strip()\n except Exception:\n # print('not a good exe', exe)\n return False\n"
] |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
get_elastix_exes
|
python
|
def get_elastix_exes():
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
|
Get the executables for elastix and transformix. Raises an error
if they cannot be found.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L145-L168
|
[
"def _find_executables(name):\n \"\"\" Try to find an executable.\n \"\"\"\n exe_name = name + '.exe' * sys.platform.startswith('win')\n env_path = os.environ.get(name.upper()+ '_PATH', '')\n\n possible_locations = []\n def add(*dirs):\n for d in dirs:\n if d and d not in possible_locations and os.path.isdir(d):\n possible_locations.append(d)\n\n # Get list of possible locations\n add(env_path)\n try:\n add(os.path.dirname(os.path.abspath(__file__)))\n except NameError: # __file__ may not exist\n pass\n add(os.path.dirname(sys.executable))\n add(os.path.expanduser('~'))\n\n # Platform specific possible locations\n if sys.platform.startswith('win'):\n add('c:\\\\program files', os.environ.get('PROGRAMFILES'),\n 'c:\\\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))\n else:\n possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])\n\n def do_check_version(exe):\n try:\n return subprocess.check_output([exe, '--version']).decode().strip()\n except Exception:\n # print('not a good exe', exe)\n return False\n\n # If env path is the exe itself ...\n if os.path.isfile(env_path):\n ver = do_check_version(env_path)\n if ver:\n return env_path, ver\n\n # First try to find obvious locations\n for d in possible_locations:\n for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:\n if os.path.isfile(exe):\n ver = do_check_version(exe)\n if ver:\n return exe, ver\n\n # Maybe the exe is on the PATH\n ver = do_check_version(exe_name)\n if ver:\n return exe_name, ver\n\n # Try harder\n for d in possible_locations:\n for sub in reversed(sorted(os.listdir(d))):\n if sub.startswith(name):\n exe = os.path.join(d, sub, exe_name)\n if os.path.isfile(exe):\n ver = do_check_version(exe)\n if ver:\n return exe, ver\n\n return None, None\n"
] |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.