repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
jvarho/pylibscrypt
|
pylibscrypt/pylibscrypt.py
|
scrypt_mcf
|
python
|
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
if (prefix != SCRYPT_MCF_PREFIX_s1 and prefix != SCRYPT_MCF_PREFIX_ANY):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if salt is None:
salt = os.urandom(16)
elif not (1 <= len(salt) <= 16):
raise ValueError('salt must be 1-16 bytes')
if N > 2**31:
raise ValueError('N > 2**31 not supported')
if b'\0' in password:
raise ValueError('scrypt_mcf password must not contain zero bytes')
hash = scrypt(password, salt, N, r, p)
h64 = base64.b64encode(hash)
s64 = base64.b64encode(salt)
out = ctypes.create_string_buffer(125)
ret = _libscrypt_mcf(N, r, p, s64, h64, out)
if not ret:
raise ValueError
out = out.raw.strip(b'\0')
# XXX: Hack to support old libscrypt (like in Ubuntu 14.04)
if len(out) == 123:
out = out + b'='
return out
|
Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pylibscrypt.py#L101-L142
|
[
"def scrypt_mcf(scrypt, password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,\n prefix=SCRYPT_MCF_PREFIX_DEFAULT):\n \"\"\"Derives a Modular Crypt Format hash using the scrypt KDF given\n\n Expects the signature:\n scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64)\n\n If no salt is given, a random salt of 128+ bits is used. (Recommended.)\n \"\"\"\n if isinstance(password, unicode):\n password = password.encode('utf8')\n elif not isinstance(password, bytes):\n raise TypeError('password must be a unicode or byte string')\n if salt is not None and not isinstance(salt, bytes):\n raise TypeError('salt must be a byte string')\n if salt is not None and not (1 <= len(salt) <= 16):\n raise ValueError('salt must be 1-16 bytes')\n if r > 255:\n raise ValueError('scrypt_mcf r out of range [1,255]')\n if p > 255:\n raise ValueError('scrypt_mcf p out of range [1,255]')\n if N > 2**31:\n raise ValueError('scrypt_mcf N out of range [2,2**31]')\n if b'\\0' in password:\n raise ValueError('scrypt_mcf password must not contain zero bytes')\n\n if prefix == SCRYPT_MCF_PREFIX_s1:\n if salt is None:\n salt = os.urandom(16)\n hash = scrypt(password, salt, N, r, p)\n return _scrypt_mcf_encode_s1(N, r, p, salt, hash)\n elif prefix == SCRYPT_MCF_PREFIX_7 or prefix == SCRYPT_MCF_PREFIX_ANY:\n if salt is None:\n salt = os.urandom(32)\n salt = _cb64enc(salt)\n hash = scrypt(password, salt, N, r, p, 32)\n return _scrypt_mcf_encode_7(N, r, p, salt, hash)\n else:\n raise ValueError(\"Unrecognized MCF format\")\n"
] |
# Copyright (c) 2014-2018, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls into system libscrypt"""
import base64
import ctypes
from ctypes import c_char_p, c_size_t, c_uint64, c_uint32
from ctypes.util import find_library
import os
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, check_args, unicode)
from . import mcf as mcf_mod
_libscrypt_soname = find_library('scrypt')
if _libscrypt_soname is None:
raise ImportError('Unable to find libscrypt')
try:
_libscrypt = ctypes.CDLL(_libscrypt_soname)
_libscrypt_scrypt = _libscrypt.libscrypt_scrypt
_libscrypt_mcf = _libscrypt.libscrypt_mcf
_libscrypt_check = _libscrypt.libscrypt_check
except OSError:
raise ImportError('Unable to load libscrypt: ' + _libscrypt_soname)
except AttributeError:
raise ImportError('Incompatible libscrypt: ' + _libscrypt_soname)
_libscrypt_scrypt.argtypes = [
c_char_p, # password
c_size_t, # password length
c_char_p, # salt
c_size_t, # salt length
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_char_p, # out
c_size_t, # out length
]
_libscrypt_mcf.argtypes = [
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_char_p, # salt
c_char_p, # hash
c_char_p, # out (125+ bytes)
]
_libscrypt_check.argtypes = [
c_char_p, # mcf (modified)
c_char_p, # hash
]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
out = ctypes.create_string_buffer(olen)
ret = _libscrypt_scrypt(password, len(password), salt, len(salt),
N, r, p, out, len(out))
if ret:
raise ValueError
return out.raw
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if len(mcf) != 124 or b'\0' in password:
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
mcfbuf = ctypes.create_string_buffer(mcf)
ret = _libscrypt_check(mcfbuf, password)
if ret < 0:
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
return bool(ret)
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pylibscrypt.py
|
scrypt_mcf_check
|
python
|
def scrypt_mcf_check(mcf, password):
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if len(mcf) != 124 or b'\0' in password:
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
mcfbuf = ctypes.create_string_buffer(mcf)
ret = _libscrypt_check(mcfbuf, password)
if ret < 0:
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
return bool(ret)
|
Returns True if the password matches the given MCF hash
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pylibscrypt.py#L145-L161
|
[
"def scrypt_mcf_check(scrypt, mcf, password):\n \"\"\"Returns True if the password matches the given MCF hash\n\n Supports both the libscrypt $s1$ format and the $7$ format.\n \"\"\"\n if not isinstance(mcf, bytes):\n raise TypeError('MCF must be a byte string')\n if isinstance(password, unicode):\n password = password.encode('utf8')\n elif not isinstance(password, bytes):\n raise TypeError('password must be a unicode or byte string')\n\n N, r, p, salt, hash, hlen = _scrypt_mcf_decode(mcf)\n h = scrypt(password, salt, N=N, r=r, p=p, olen=hlen)\n cmp = 0\n for i, j in zip(bytearray(h), bytearray(hash)):\n cmp |= i ^ j\n return cmp == 0\n"
] |
# Copyright (c) 2014-2018, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls into system libscrypt"""
import base64
import ctypes
from ctypes import c_char_p, c_size_t, c_uint64, c_uint32
from ctypes.util import find_library
import os
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, check_args, unicode)
from . import mcf as mcf_mod
_libscrypt_soname = find_library('scrypt')
if _libscrypt_soname is None:
raise ImportError('Unable to find libscrypt')
try:
_libscrypt = ctypes.CDLL(_libscrypt_soname)
_libscrypt_scrypt = _libscrypt.libscrypt_scrypt
_libscrypt_mcf = _libscrypt.libscrypt_mcf
_libscrypt_check = _libscrypt.libscrypt_check
except OSError:
raise ImportError('Unable to load libscrypt: ' + _libscrypt_soname)
except AttributeError:
raise ImportError('Incompatible libscrypt: ' + _libscrypt_soname)
_libscrypt_scrypt.argtypes = [
c_char_p, # password
c_size_t, # password length
c_char_p, # salt
c_size_t, # salt length
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_char_p, # out
c_size_t, # out length
]
_libscrypt_mcf.argtypes = [
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_char_p, # salt
c_char_p, # hash
c_char_p, # out (125+ bytes)
]
_libscrypt_check.argtypes = [
c_char_p, # mcf (modified)
c_char_p, # hash
]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
out = ctypes.create_string_buffer(olen)
ret = _libscrypt_scrypt(password, len(password), salt, len(salt),
N, r, p, out, len(out))
if ret:
raise ValueError
return out.raw
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if (prefix != SCRYPT_MCF_PREFIX_s1 and prefix != SCRYPT_MCF_PREFIX_ANY):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if salt is None:
salt = os.urandom(16)
elif not (1 <= len(salt) <= 16):
raise ValueError('salt must be 1-16 bytes')
if N > 2**31:
raise ValueError('N > 2**31 not supported')
if b'\0' in password:
raise ValueError('scrypt_mcf password must not contain zero bytes')
hash = scrypt(password, salt, N, r, p)
h64 = base64.b64encode(hash)
s64 = base64.b64encode(salt)
out = ctypes.create_string_buffer(125)
ret = _libscrypt_mcf(N, r, p, s64, h64, out)
if not ret:
raise ValueError
out = out.raw.strip(b'\0')
# XXX: Hack to support old libscrypt (like in Ubuntu 14.04)
if len(out) == 123:
out = out + b'='
return out
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
JarryShaw/DictDumper
|
src/plist.py
|
PLIST._append_value
|
python
|
def _append_value(self, value, _file, _name):
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{name}</key>\n'.format(tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._append_dict(value, _file)
|
Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/plist.py#L151-L165
| null |
class PLIST(XML):
"""Dump Apple property list (PLIST) format file.
Usage:
>>> dumper = PLIST(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= array | dict | string | data
| date | integer | real | bool
array ::= "<array>" value* "</array>"
dict ::= "<dict>" ("<key>" str "</key>" value)* "</dict>"
string ::= "<string>" str "</string>"
data ::= "<data>" bytes "</data>"
date ::= "<date>" datetime "</date>"
integer ::= "<integer>" int "</integer>"
real ::= "<real>" float "</real>"
bool ::= "<true/>" | "<false/>"
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'plist'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # dict
datetime.date, # date
int, # integer
float, # real
bytes, bytearray, memoryview, # data
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Utilities.
##########################################################################
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<array>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for _item in value:
if _item is None:
continue
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</array>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_dict(self, value, _file):
"""Call this function to write dict contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<dict>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
if _text is None:
continue
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{item}</key>\n'.format(tabs=_tabs, item=_item)
_file.write(_keys)
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</dict>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<string>{text}</string>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_data(self, value, _file):
"""Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_tabs = '\t' * self._tctr
_text = base64.b64encode(value).decode() # value.hex() # str(value)[2:-1]
_labs = '{tabs}<data>{text}</data>\n'.format(tabs=_tabs, text=_text)
# _labs = '{tabs}<data>\n'.format(tabs=_tabs)
# _list = []
# for _item in textwrap.wrap(value.hex(), 32):
# _text = ' '.join(textwrap.wrap(_item, 2))
# _item = '{tabs}\t{text}'.format(tabs=_tabs, text=_text)
# _list.append(_item)
# _labs += '\n'.join(_list)
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
# _labs += '\n{tabs}</data>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_date(self, value, _file):
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = '{tabs}<date>{text}</date>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_integer(self, value, _file):
"""Call this function to write integer contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<integer>{text}</integer>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_real(self, value, _file):
"""Call this function to write real contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<real>{text}</real>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_bool(self, value, _file):
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = '<true/>' if value else '<false/>'
_labs = '{tabs}{text}\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/plist.py
|
PLIST._append_array
|
python
|
def _append_array(self, value, _file):
_tabs = '\t' * self._tctr
_labs = '{tabs}<array>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for _item in value:
if _item is None:
continue
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</array>\n'.format(tabs=_tabs)
_file.write(_labs)
|
Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/plist.py#L167-L190
| null |
class PLIST(XML):
"""Dump Apple property list (PLIST) format file.
Usage:
>>> dumper = PLIST(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= array | dict | string | data
| date | integer | real | bool
array ::= "<array>" value* "</array>"
dict ::= "<dict>" ("<key>" str "</key>" value)* "</dict>"
string ::= "<string>" str "</string>"
data ::= "<data>" bytes "</data>"
date ::= "<date>" datetime "</date>"
integer ::= "<integer>" int "</integer>"
real ::= "<real>" float "</real>"
bool ::= "<true/>" | "<false/>"
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'plist'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # dict
datetime.date, # date
int, # integer
float, # real
bytes, bytearray, memoryview, # data
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{name}</key>\n'.format(tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._append_dict(value, _file)
def _append_dict(self, value, _file):
"""Call this function to write dict contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<dict>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
if _text is None:
continue
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{item}</key>\n'.format(tabs=_tabs, item=_item)
_file.write(_keys)
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</dict>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<string>{text}</string>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_data(self, value, _file):
"""Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_tabs = '\t' * self._tctr
_text = base64.b64encode(value).decode() # value.hex() # str(value)[2:-1]
_labs = '{tabs}<data>{text}</data>\n'.format(tabs=_tabs, text=_text)
# _labs = '{tabs}<data>\n'.format(tabs=_tabs)
# _list = []
# for _item in textwrap.wrap(value.hex(), 32):
# _text = ' '.join(textwrap.wrap(_item, 2))
# _item = '{tabs}\t{text}'.format(tabs=_tabs, text=_text)
# _list.append(_item)
# _labs += '\n'.join(_list)
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
# _labs += '\n{tabs}</data>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_date(self, value, _file):
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = '{tabs}<date>{text}</date>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_integer(self, value, _file):
"""Call this function to write integer contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<integer>{text}</integer>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_real(self, value, _file):
"""Call this function to write real contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<real>{text}</real>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_bool(self, value, _file):
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = '<true/>' if value else '<false/>'
_labs = '{tabs}{text}\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/plist.py
|
PLIST._append_dict
|
python
|
def _append_dict(self, value, _file):
_tabs = '\t' * self._tctr
_labs = '{tabs}<dict>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
if _text is None:
continue
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{item}</key>\n'.format(tabs=_tabs, item=_item)
_file.write(_keys)
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</dict>\n'.format(tabs=_tabs)
_file.write(_labs)
|
Call this function to write dict contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/plist.py#L192-L220
| null |
class PLIST(XML):
"""Dump Apple property list (PLIST) format file.
Usage:
>>> dumper = PLIST(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= array | dict | string | data
| date | integer | real | bool
array ::= "<array>" value* "</array>"
dict ::= "<dict>" ("<key>" str "</key>" value)* "</dict>"
string ::= "<string>" str "</string>"
data ::= "<data>" bytes "</data>"
date ::= "<date>" datetime "</date>"
integer ::= "<integer>" int "</integer>"
real ::= "<real>" float "</real>"
bool ::= "<true/>" | "<false/>"
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'plist'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # dict
datetime.date, # date
int, # integer
float, # real
bytes, bytearray, memoryview, # data
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{name}</key>\n'.format(tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._append_dict(value, _file)
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<array>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for _item in value:
if _item is None:
continue
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</array>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<string>{text}</string>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_data(self, value, _file):
"""Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_tabs = '\t' * self._tctr
_text = base64.b64encode(value).decode() # value.hex() # str(value)[2:-1]
_labs = '{tabs}<data>{text}</data>\n'.format(tabs=_tabs, text=_text)
# _labs = '{tabs}<data>\n'.format(tabs=_tabs)
# _list = []
# for _item in textwrap.wrap(value.hex(), 32):
# _text = ' '.join(textwrap.wrap(_item, 2))
# _item = '{tabs}\t{text}'.format(tabs=_tabs, text=_text)
# _list.append(_item)
# _labs += '\n'.join(_list)
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
# _labs += '\n{tabs}</data>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_date(self, value, _file):
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = '{tabs}<date>{text}</date>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_integer(self, value, _file):
"""Call this function to write integer contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<integer>{text}</integer>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_real(self, value, _file):
"""Call this function to write real contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<real>{text}</real>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_bool(self, value, _file):
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = '<true/>' if value else '<false/>'
_labs = '{tabs}{text}\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/plist.py
|
PLIST._append_data
|
python
|
def _append_data(self, value, _file):
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_tabs = '\t' * self._tctr
_text = base64.b64encode(value).decode() # value.hex() # str(value)[2:-1]
_labs = '{tabs}<data>{text}</data>\n'.format(tabs=_tabs, text=_text)
# _labs = '{tabs}<data>\n'.format(tabs=_tabs)
# _list = []
# for _item in textwrap.wrap(value.hex(), 32):
# _text = ' '.join(textwrap.wrap(_item, 2))
# _item = '{tabs}\t{text}'.format(tabs=_tabs, text=_text)
# _list.append(_item)
# _labs += '\n'.join(_list)
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
# _labs += '\n{tabs}</data>\n'.format(tabs=_tabs)
_file.write(_labs)
|
Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/plist.py#L235-L262
| null |
class PLIST(XML):
"""Dump Apple property list (PLIST) format file.
Usage:
>>> dumper = PLIST(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= array | dict | string | data
| date | integer | real | bool
array ::= "<array>" value* "</array>"
dict ::= "<dict>" ("<key>" str "</key>" value)* "</dict>"
string ::= "<string>" str "</string>"
data ::= "<data>" bytes "</data>"
date ::= "<date>" datetime "</date>"
integer ::= "<integer>" int "</integer>"
real ::= "<real>" float "</real>"
bool ::= "<true/>" | "<false/>"
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'plist'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # dict
datetime.date, # date
int, # integer
float, # real
bytes, bytearray, memoryview, # data
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{name}</key>\n'.format(tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._append_dict(value, _file)
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<array>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for _item in value:
if _item is None:
continue
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</array>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_dict(self, value, _file):
"""Call this function to write dict contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<dict>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
if _text is None:
continue
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{item}</key>\n'.format(tabs=_tabs, item=_item)
_file.write(_keys)
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</dict>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<string>{text}</string>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_date(self, value, _file):
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = '{tabs}<date>{text}</date>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_integer(self, value, _file):
"""Call this function to write integer contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<integer>{text}</integer>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_real(self, value, _file):
"""Call this function to write real contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<real>{text}</real>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_bool(self, value, _file):
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = '<true/>' if value else '<false/>'
_labs = '{tabs}{text}\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/plist.py
|
PLIST._append_date
|
python
|
def _append_date(self, value, _file):
_tabs = '\t' * self._tctr
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = '{tabs}<date>{text}</date>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
|
Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/plist.py#L264-L275
| null |
class PLIST(XML):
"""Dump Apple property list (PLIST) format file.
Usage:
>>> dumper = PLIST(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= array | dict | string | data
| date | integer | real | bool
array ::= "<array>" value* "</array>"
dict ::= "<dict>" ("<key>" str "</key>" value)* "</dict>"
string ::= "<string>" str "</string>"
data ::= "<data>" bytes "</data>"
date ::= "<date>" datetime "</date>"
integer ::= "<integer>" int "</integer>"
real ::= "<real>" float "</real>"
bool ::= "<true/>" | "<false/>"
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'plist'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # dict
datetime.date, # date
int, # integer
float, # real
bytes, bytearray, memoryview, # data
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{name}</key>\n'.format(tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._append_dict(value, _file)
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<array>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for _item in value:
if _item is None:
continue
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</array>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_dict(self, value, _file):
"""Call this function to write dict contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<dict>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
if _text is None:
continue
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{item}</key>\n'.format(tabs=_tabs, item=_item)
_file.write(_keys)
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</dict>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<string>{text}</string>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_data(self, value, _file):
"""Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_tabs = '\t' * self._tctr
_text = base64.b64encode(value).decode() # value.hex() # str(value)[2:-1]
_labs = '{tabs}<data>{text}</data>\n'.format(tabs=_tabs, text=_text)
# _labs = '{tabs}<data>\n'.format(tabs=_tabs)
# _list = []
# for _item in textwrap.wrap(value.hex(), 32):
# _text = ' '.join(textwrap.wrap(_item, 2))
# _item = '{tabs}\t{text}'.format(tabs=_tabs, text=_text)
# _list.append(_item)
# _labs += '\n'.join(_list)
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
# _labs += '\n{tabs}</data>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_integer(self, value, _file):
"""Call this function to write integer contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<integer>{text}</integer>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_real(self, value, _file):
"""Call this function to write real contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<real>{text}</real>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_bool(self, value, _file):
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = '<true/>' if value else '<false/>'
_labs = '{tabs}{text}\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/plist.py
|
PLIST._append_integer
|
python
|
def _append_integer(self, value, _file):
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<integer>{text}</integer>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
|
Call this function to write integer contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/plist.py#L277-L288
| null |
class PLIST(XML):
"""Dump Apple property list (PLIST) format file.
Usage:
>>> dumper = PLIST(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= array | dict | string | data
| date | integer | real | bool
array ::= "<array>" value* "</array>"
dict ::= "<dict>" ("<key>" str "</key>" value)* "</dict>"
string ::= "<string>" str "</string>"
data ::= "<data>" bytes "</data>"
date ::= "<date>" datetime "</date>"
integer ::= "<integer>" int "</integer>"
real ::= "<real>" float "</real>"
bool ::= "<true/>" | "<false/>"
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'plist'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # dict
datetime.date, # date
int, # integer
float, # real
bytes, bytearray, memoryview, # data
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{name}</key>\n'.format(tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._append_dict(value, _file)
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<array>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for _item in value:
if _item is None:
continue
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</array>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_dict(self, value, _file):
"""Call this function to write dict contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_labs = '{tabs}<dict>\n'.format(tabs=_tabs)
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
if _text is None:
continue
_tabs = '\t' * self._tctr
_keys = '{tabs}<key>{item}</key>\n'.format(tabs=_tabs, item=_item)
_file.write(_keys)
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '{tabs}</dict>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<string>{text}</string>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_data(self, value, _file):
"""Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_tabs = '\t' * self._tctr
_text = base64.b64encode(value).decode() # value.hex() # str(value)[2:-1]
_labs = '{tabs}<data>{text}</data>\n'.format(tabs=_tabs, text=_text)
# _labs = '{tabs}<data>\n'.format(tabs=_tabs)
# _list = []
# for _item in textwrap.wrap(value.hex(), 32):
# _text = ' '.join(textwrap.wrap(_item, 2))
# _item = '{tabs}\t{text}'.format(tabs=_tabs, text=_text)
# _list.append(_item)
# _labs += '\n'.join(_list)
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
# _labs += '\n{tabs}</data>\n'.format(tabs=_tabs)
_file.write(_labs)
def _append_date(self, value, _file):
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = '{tabs}<date>{text}</date>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_real(self, value, _file):
"""Call this function to write real contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = value
_labs = '{tabs}<real>{text}</real>\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
def _append_bool(self, value, _file):
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_tabs = '\t' * self._tctr
_text = '<true/>' if value else '<false/>'
_labs = '{tabs}{text}\n'.format(tabs=_tabs, text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/dumper.py
|
Dumper._dump_header
|
python
|
def _dump_header(self):
with open(self._file, 'w') as _file:
_file.write(self._hsrt)
self._sptr = _file.tell()
_file.write(self._hend)
|
Initially dump file heads and tails.
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/dumper.py#L116-L121
| null |
class Dumper(object): # pylint:disable= metaclass-assignment,useless-object-inheritance
"""Abstract base class of all dumpers.
Usage:
>>> dumper = Dumper(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, file format of current dumper
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hsrt - str, _HEADER_START
* _hend - str, _HEADER_END
"""
__metaclass__ = ABCMeta
##########################################################################
# Attributes.
##########################################################################
_sptr = os.SEEK_SET # seek pointer
_tctr = 1 # counter for tab level
_hsrt = ''
_hend = ''
##########################################################################
# Properties.
##########################################################################
# file format of current dumper
@abstractproperty
def kind(self):
"""File format of current dumper."""
pass # pylint: disable=unnecessary-pass
##########################################################################
# Data models.
##########################################################################
# Not hashable
__hash__ = None
@deprecation
def __new__(cls, fname, **kwargs): # pylint: disable=unused-argument
self = super().__new__(cls)
self.object_hook = \
kwargs.get('object_hook', cls.object_hook)
return self
def __init__(self, fname, **kwargs): # pylint: disable=unused-argument
if not os.path.isfile(fname):
open(fname, 'w+').close()
self._file = fname # dump file name
self._dump_header() # initialise output file
def __call__(self, value, name=None):
with open(self._file, 'r+') as _file:
self._append_value(value, _file, name)
self._sptr = _file.tell()
_file.write(self._hend)
##########################################################################
# Utilities.
##########################################################################
@classmethod
def object_hook(cls, obj):
"""Check content type for function call."""
if isinstance(obj, cls):
return obj
return repr(obj)
@abstractmethod
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
pass # pylint: disable=unnecessary-pass
|
JarryShaw/DictDumper
|
src/tree.py
|
Tree._append_value
|
python
|
def _append_value(self, value, _file, _name):
if self._flag:
_keys = _name + '\n'
else:
_keys = '\n' + _name + '\n'
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._bctr = collections.defaultdict(int) # blank branch counter dict
self._append_branch(value, _file)
|
Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/tree.py#L169-L186
| null |
class Tree(Dumper):
"""Dump a tree-view text (TXT) format file.
Usage:
>>> dumper = Tree(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _bctr - dict, blank branch counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= branch | array | string | number | bool | N/A
string
|-- string
| |-- string -> value
| |-- string
| | |-- string -> value
| | |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
|-- string -> value, value, value
|-- string -> True
|-- string -> False
|-- string -> N/A
|-- string -> value
|-- string -> value
"""
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # branch
type(None), # none
datetime.date, # date
int, float, complex, # number
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_bctr = None
_tctr = -1
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'txt'
##########################################################################
# Data models.
##########################################################################
def __init__(self, fname, **kwargs):
self._flag = kwargs.pop('quiet', False)
if self._flag:
self._hsrt = ''
super().__init__(fname, **kwargs)
self._hsrt = _HEADER_START
##########################################################################
# Utilities.
##########################################################################
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_bptr = ''
_tabs = ''
_tlen = len(value) - 1
if _tlen:
_bptr = ' |-->'
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
else:
_tabs = ''
for (_nctr, _item) in enumerate(value):
_text = '{tabs}{bptr}'.format(tabs=_tabs, bptr=_bptr)
_file.write(_text)
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
_suff = '\n' if _nctr < _tlen else ''
_file.write(_suff)
def _append_branch(self, value, _file):
"""Call this function to write branch contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
return
# return self._append_none(None, _file)
self._tctr += 1
_vlen = len(value)
for (_vctr, (_item, _text)) in enumerate(value.items()):
_text = self.object_hook(_text)
_type = type(_text).__name__
flag_dict = (_type == 'dict')
flag_list = (_type == 'list' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_tuple = (_type == 'tuple' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_bytes = (_type == 'bytes' and len(_text) > 16)
if any((flag_dict, flag_list, flag_tuple, flag_bytes)):
_pref = '\n'
else:
_pref = ' ->'
_labs = ''
for _ in range(self._tctr):
_labs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_keys = '{labs} |-- {item}{pref}'.format(labs=_labs, item=_item, pref=_pref)
_file.write(_keys)
if _vctr == _vlen - 1:
self._bctr[self._tctr] = 1
_MAGIC_TYPES[_type](self, _text, _file)
_suff = '' if _type == 'dict' else '\n'
_file.write(_suff)
self._bctr[self._tctr] = 0
self._tctr -= 1
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bytes(self, value, _file):
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
if not value:
self._append_none(None, _file)
return
if len(value) > 16:
_tabs = ''
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_list = []
for (_ictr, _item) in enumerate(textwrap.wrap(value.hex(), 32)):
_bptr = ' ' if _ictr else ' |--> '
_text = ' '.join(textwrap.wrap(_item, 2))
_item = '{tabs}{bptr}{text}'.format(tabs=_tabs, bptr=_bptr, text=_text)
_list.append(_item)
_labs = '\n'.join(_list)
else:
_text = ' '.join(textwrap.wrap(value.hex(), 2))
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%d %H:%M:%S.%f')
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'True' if value else 'False'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_none(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write none contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'NIL'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/tree.py
|
Tree._append_array
|
python
|
def _append_array(self, value, _file):
if not value:
self._append_none(None, _file)
return
_bptr = ''
_tabs = ''
_tlen = len(value) - 1
if _tlen:
_bptr = ' |-->'
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
else:
_tabs = ''
for (_nctr, _item) in enumerate(value):
_text = '{tabs}{bptr}'.format(tabs=_tabs, bptr=_bptr)
_file.write(_text)
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
_suff = '\n' if _nctr < _tlen else ''
_file.write(_suff)
|
Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/tree.py#L188-L219
| null |
class Tree(Dumper):
"""Dump a tree-view text (TXT) format file.
Usage:
>>> dumper = Tree(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _bctr - dict, blank branch counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= branch | array | string | number | bool | N/A
string
|-- string
| |-- string -> value
| |-- string
| | |-- string -> value
| | |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
|-- string -> value, value, value
|-- string -> True
|-- string -> False
|-- string -> N/A
|-- string -> value
|-- string -> value
"""
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # branch
type(None), # none
datetime.date, # date
int, float, complex, # number
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_bctr = None
_tctr = -1
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'txt'
##########################################################################
# Data models.
##########################################################################
def __init__(self, fname, **kwargs):
self._flag = kwargs.pop('quiet', False)
if self._flag:
self._hsrt = ''
super().__init__(fname, **kwargs)
self._hsrt = _HEADER_START
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
if self._flag:
_keys = _name + '\n'
else:
_keys = '\n' + _name + '\n'
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._bctr = collections.defaultdict(int) # blank branch counter dict
self._append_branch(value, _file)
def _append_branch(self, value, _file):
"""Call this function to write branch contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
return
# return self._append_none(None, _file)
self._tctr += 1
_vlen = len(value)
for (_vctr, (_item, _text)) in enumerate(value.items()):
_text = self.object_hook(_text)
_type = type(_text).__name__
flag_dict = (_type == 'dict')
flag_list = (_type == 'list' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_tuple = (_type == 'tuple' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_bytes = (_type == 'bytes' and len(_text) > 16)
if any((flag_dict, flag_list, flag_tuple, flag_bytes)):
_pref = '\n'
else:
_pref = ' ->'
_labs = ''
for _ in range(self._tctr):
_labs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_keys = '{labs} |-- {item}{pref}'.format(labs=_labs, item=_item, pref=_pref)
_file.write(_keys)
if _vctr == _vlen - 1:
self._bctr[self._tctr] = 1
_MAGIC_TYPES[_type](self, _text, _file)
_suff = '' if _type == 'dict' else '\n'
_file.write(_suff)
self._bctr[self._tctr] = 0
self._tctr -= 1
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bytes(self, value, _file):
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
if not value:
self._append_none(None, _file)
return
if len(value) > 16:
_tabs = ''
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_list = []
for (_ictr, _item) in enumerate(textwrap.wrap(value.hex(), 32)):
_bptr = ' ' if _ictr else ' |--> '
_text = ' '.join(textwrap.wrap(_item, 2))
_item = '{tabs}{bptr}{text}'.format(tabs=_tabs, bptr=_bptr, text=_text)
_list.append(_item)
_labs = '\n'.join(_list)
else:
_text = ' '.join(textwrap.wrap(value.hex(), 2))
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%d %H:%M:%S.%f')
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'True' if value else 'False'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_none(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write none contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'NIL'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/tree.py
|
Tree._append_branch
|
python
|
def _append_branch(self, value, _file):
if not value:
return
# return self._append_none(None, _file)
self._tctr += 1
_vlen = len(value)
for (_vctr, (_item, _text)) in enumerate(value.items()):
_text = self.object_hook(_text)
_type = type(_text).__name__
flag_dict = (_type == 'dict')
flag_list = (_type == 'list' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_tuple = (_type == 'tuple' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_bytes = (_type == 'bytes' and len(_text) > 16)
if any((flag_dict, flag_list, flag_tuple, flag_bytes)):
_pref = '\n'
else:
_pref = ' ->'
_labs = ''
for _ in range(self._tctr):
_labs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_keys = '{labs} |-- {item}{pref}'.format(labs=_labs, item=_item, pref=_pref)
_file.write(_keys)
if _vctr == _vlen - 1:
self._bctr[self._tctr] = 1
_MAGIC_TYPES[_type](self, _text, _file)
_suff = '' if _type == 'dict' else '\n'
_file.write(_suff)
self._bctr[self._tctr] = 0
self._tctr -= 1
|
Call this function to write branch contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/tree.py#L221-L264
| null |
class Tree(Dumper):
"""Dump a tree-view text (TXT) format file.
Usage:
>>> dumper = Tree(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _bctr - dict, blank branch counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= branch | array | string | number | bool | N/A
string
|-- string
| |-- string -> value
| |-- string
| | |-- string -> value
| | |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
|-- string -> value, value, value
|-- string -> True
|-- string -> False
|-- string -> N/A
|-- string -> value
|-- string -> value
"""
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # branch
type(None), # none
datetime.date, # date
int, float, complex, # number
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_bctr = None
_tctr = -1
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'txt'
##########################################################################
# Data models.
##########################################################################
def __init__(self, fname, **kwargs):
self._flag = kwargs.pop('quiet', False)
if self._flag:
self._hsrt = ''
super().__init__(fname, **kwargs)
self._hsrt = _HEADER_START
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
if self._flag:
_keys = _name + '\n'
else:
_keys = '\n' + _name + '\n'
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._bctr = collections.defaultdict(int) # blank branch counter dict
self._append_branch(value, _file)
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_bptr = ''
_tabs = ''
_tlen = len(value) - 1
if _tlen:
_bptr = ' |-->'
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
else:
_tabs = ''
for (_nctr, _item) in enumerate(value):
_text = '{tabs}{bptr}'.format(tabs=_tabs, bptr=_bptr)
_file.write(_text)
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
_suff = '\n' if _nctr < _tlen else ''
_file.write(_suff)
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bytes(self, value, _file):
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
if not value:
self._append_none(None, _file)
return
if len(value) > 16:
_tabs = ''
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_list = []
for (_ictr, _item) in enumerate(textwrap.wrap(value.hex(), 32)):
_bptr = ' ' if _ictr else ' |--> '
_text = ' '.join(textwrap.wrap(_item, 2))
_item = '{tabs}{bptr}{text}'.format(tabs=_tabs, bptr=_bptr, text=_text)
_list.append(_item)
_labs = '\n'.join(_list)
else:
_text = ' '.join(textwrap.wrap(value.hex(), 2))
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%d %H:%M:%S.%f')
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'True' if value else 'False'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_none(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write none contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'NIL'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/tree.py
|
Tree._append_bytes
|
python
|
def _append_bytes(self, value, _file):
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
if not value:
self._append_none(None, _file)
return
if len(value) > 16:
_tabs = ''
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_list = []
for (_ictr, _item) in enumerate(textwrap.wrap(value.hex(), 32)):
_bptr = ' ' if _ictr else ' |--> '
_text = ' '.join(textwrap.wrap(_item, 2))
_item = '{tabs}{bptr}{text}'.format(tabs=_tabs, bptr=_bptr, text=_text)
_list.append(_item)
_labs = '\n'.join(_list)
else:
_text = ' '.join(textwrap.wrap(value.hex(), 2))
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/tree.py#L282-L311
| null |
class Tree(Dumper):
"""Dump a tree-view text (TXT) format file.
Usage:
>>> dumper = Tree(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _bctr - dict, blank branch counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= branch | array | string | number | bool | N/A
string
|-- string
| |-- string -> value
| |-- string
| | |-- string -> value
| | |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
|-- string -> value, value, value
|-- string -> True
|-- string -> False
|-- string -> N/A
|-- string -> value
|-- string -> value
"""
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # branch
type(None), # none
datetime.date, # date
int, float, complex, # number
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_bctr = None
_tctr = -1
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'txt'
##########################################################################
# Data models.
##########################################################################
def __init__(self, fname, **kwargs):
self._flag = kwargs.pop('quiet', False)
if self._flag:
self._hsrt = ''
super().__init__(fname, **kwargs)
self._hsrt = _HEADER_START
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
if self._flag:
_keys = _name + '\n'
else:
_keys = '\n' + _name + '\n'
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._bctr = collections.defaultdict(int) # blank branch counter dict
self._append_branch(value, _file)
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_bptr = ''
_tabs = ''
_tlen = len(value) - 1
if _tlen:
_bptr = ' |-->'
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
else:
_tabs = ''
for (_nctr, _item) in enumerate(value):
_text = '{tabs}{bptr}'.format(tabs=_tabs, bptr=_bptr)
_file.write(_text)
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
_suff = '\n' if _nctr < _tlen else ''
_file.write(_suff)
def _append_branch(self, value, _file):
"""Call this function to write branch contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
return
# return self._append_none(None, _file)
self._tctr += 1
_vlen = len(value)
for (_vctr, (_item, _text)) in enumerate(value.items()):
_text = self.object_hook(_text)
_type = type(_text).__name__
flag_dict = (_type == 'dict')
flag_list = (_type == 'list' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_tuple = (_type == 'tuple' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_bytes = (_type == 'bytes' and len(_text) > 16)
if any((flag_dict, flag_list, flag_tuple, flag_bytes)):
_pref = '\n'
else:
_pref = ' ->'
_labs = ''
for _ in range(self._tctr):
_labs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_keys = '{labs} |-- {item}{pref}'.format(labs=_labs, item=_item, pref=_pref)
_file.write(_keys)
if _vctr == _vlen - 1:
self._bctr[self._tctr] = 1
_MAGIC_TYPES[_type](self, _text, _file)
_suff = '' if _type == 'dict' else '\n'
_file.write(_suff)
self._bctr[self._tctr] = 0
self._tctr -= 1
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%d %H:%M:%S.%f')
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'True' if value else 'False'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_none(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write none contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'NIL'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/tree.py
|
Tree._append_number
|
python
|
def _append_number(self, value, _file): # pylint: disable=no-self-use
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/tree.py#L325-L335
| null |
class Tree(Dumper):
"""Dump a tree-view text (TXT) format file.
Usage:
>>> dumper = Tree(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'plist'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _bctr - dict, blank branch counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
value ::= branch | array | string | number | bool | N/A
string
|-- string
| |-- string -> value
| |-- string
| | |-- string -> value
| | |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
| |-- string -> value
|-- string -> value, value, value
|-- string -> True
|-- string -> False
|-- string -> N/A
|-- string -> value
|-- string -> value
"""
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # branch
type(None), # none
datetime.date, # date
int, float, complex, # number
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_bctr = None
_tctr = -1
_hsrt = _HEADER_START
_hend = _HEADER_END
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'txt'
##########################################################################
# Data models.
##########################################################################
def __init__(self, fname, **kwargs):
self._flag = kwargs.pop('quiet', False)
if self._flag:
self._hsrt = ''
super().__init__(fname, **kwargs)
self._hsrt = _HEADER_START
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
if self._flag:
_keys = _name + '\n'
else:
_keys = '\n' + _name + '\n'
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._bctr = collections.defaultdict(int) # blank branch counter dict
self._append_branch(value, _file)
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_bptr = ''
_tabs = ''
_tlen = len(value) - 1
if _tlen:
_bptr = ' |-->'
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
else:
_tabs = ''
for (_nctr, _item) in enumerate(value):
_text = '{tabs}{bptr}'.format(tabs=_tabs, bptr=_bptr)
_file.write(_text)
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
_suff = '\n' if _nctr < _tlen else ''
_file.write(_suff)
def _append_branch(self, value, _file):
"""Call this function to write branch contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
return
# return self._append_none(None, _file)
self._tctr += 1
_vlen = len(value)
for (_vctr, (_item, _text)) in enumerate(value.items()):
_text = self.object_hook(_text)
_type = type(_text).__name__
flag_dict = (_type == 'dict')
flag_list = (_type == 'list' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_tuple = (_type == 'tuple' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long
flag_bytes = (_type == 'bytes' and len(_text) > 16)
if any((flag_dict, flag_list, flag_tuple, flag_bytes)):
_pref = '\n'
else:
_pref = ' ->'
_labs = ''
for _ in range(self._tctr):
_labs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_keys = '{labs} |-- {item}{pref}'.format(labs=_labs, item=_item, pref=_pref)
_file.write(_keys)
if _vctr == _vlen - 1:
self._bctr[self._tctr] = 1
_MAGIC_TYPES[_type](self, _text, _file)
_suff = '' if _type == 'dict' else '\n'
_file.write(_suff)
self._bctr[self._tctr] = 0
self._tctr -= 1
def _append_string(self, value, _file):
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
if not value:
self._append_none(None, _file)
return
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bytes(self, value, _file):
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
if not value:
self._append_none(None, _file)
return
if len(value) > 16:
_tabs = ''
for _ in range(self._tctr + 1):
_tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH
_list = []
for (_ictr, _item) in enumerate(textwrap.wrap(value.hex(), 32)):
_bptr = ' ' if _ictr else ' |--> '
_text = ' '.join(textwrap.wrap(_item, 2))
_item = '{tabs}{bptr}{text}'.format(tabs=_tabs, bptr=_bptr, text=_text)
_list.append(_item)
_labs = '\n'.join(_list)
else:
_text = ' '.join(textwrap.wrap(value.hex(), 2))
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%d %H:%M:%S.%f')
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'True' if value else 'False'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_none(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write none contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'NIL'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/json.py
|
JSON._append_value
|
python
|
def _append_value(self, value, _file, _name):
_tabs = '\t' * self._tctr
_cmma = ',\n' if self._vctr[self._tctr] else ''
_keys = '{cmma}{tabs}"{name}" :'.format(cmma=_cmma, tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._vctr[self._tctr] += 1
self._append_object(value, _file)
|
Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/json.py#L143-L160
| null |
class JSON(Dumper):
"""Dump JavaScript object notation (JSON) format file.
Usage:
>>> dumper = JSON(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'json'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _vctr - dict, value counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
object ::= "{}" | ("{" members "}")
members ::= pair | (pair "," members)
pair ::= string ":" value
array ::= "[]" | ("[" elements "]")
elements ::= value | (value "," elements)
value ::= string | number | object
| array | true | false | null
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'json'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # object
datetime.date, # date
int, float, complex, # number
type(None), # null
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
_vctr = collections.defaultdict(int) # value counter dict
##########################################################################
# Utilities.
##########################################################################
##########################################################################
# Functions.
##########################################################################
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' ['
_file.write(_labs)
self._tctr += 1
for _item in value:
_cmma = ',' if self._vctr[self._tctr] else ''
_file.write(_cmma)
self._vctr[self._tctr] += 1
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_labs = ' ]'
_file.write(_labs)
def _append_object(self, value, _file):
"""Call this function to write object contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' {'
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
_tabs = '\t' * self._tctr
_cmma = ',' if self._vctr[self._tctr] else ''
_keys = '{cmma}\n{tabs}"{item}" :'.format(cmma=_cmma, tabs=_tabs, item=_item)
_file.write(_keys)
self._vctr[self._tctr] += 1
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '\n{tabs}{}'.format('}', tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = str(value).replace('"', '\\"')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_bytes(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_text = ' '.join(textwrap.wrap(value.hex(), 2))
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'true' if value else 'false'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_null(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write null contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'null'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/json.py
|
JSON._append_array
|
python
|
def _append_array(self, value, _file):
_labs = ' ['
_file.write(_labs)
self._tctr += 1
for _item in value:
_cmma = ',' if self._vctr[self._tctr] else ''
_file.write(_cmma)
self._vctr[self._tctr] += 1
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_labs = ' ]'
_file.write(_labs)
|
Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/json.py#L166-L193
| null |
class JSON(Dumper):
"""Dump JavaScript object notation (JSON) format file.
Usage:
>>> dumper = JSON(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'json'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _vctr - dict, value counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
object ::= "{}" | ("{" members "}")
members ::= pair | (pair "," members)
pair ::= string ":" value
array ::= "[]" | ("[" elements "]")
elements ::= value | (value "," elements)
value ::= string | number | object
| array | true | false | null
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'json'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # object
datetime.date, # date
int, float, complex, # number
type(None), # null
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
_vctr = collections.defaultdict(int) # value counter dict
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_cmma = ',\n' if self._vctr[self._tctr] else ''
_keys = '{cmma}{tabs}"{name}" :'.format(cmma=_cmma, tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._vctr[self._tctr] += 1
self._append_object(value, _file)
##########################################################################
# Functions.
##########################################################################
def _append_object(self, value, _file):
"""Call this function to write object contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' {'
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
_tabs = '\t' * self._tctr
_cmma = ',' if self._vctr[self._tctr] else ''
_keys = '{cmma}\n{tabs}"{item}" :'.format(cmma=_cmma, tabs=_tabs, item=_item)
_file.write(_keys)
self._vctr[self._tctr] += 1
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '\n{tabs}{}'.format('}', tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = str(value).replace('"', '\\"')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_bytes(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_text = ' '.join(textwrap.wrap(value.hex(), 2))
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'true' if value else 'false'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_null(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write null contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'null'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/json.py
|
JSON._append_object
|
python
|
def _append_object(self, value, _file):
_labs = ' {'
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
_tabs = '\t' * self._tctr
_cmma = ',' if self._vctr[self._tctr] else ''
_keys = '{cmma}\n{tabs}"{item}" :'.format(cmma=_cmma, tabs=_tabs, item=_item)
_file.write(_keys)
self._vctr[self._tctr] += 1
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '\n{tabs}{}'.format('}', tabs=_tabs)
_file.write(_labs)
|
Call this function to write object contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/json.py#L195-L223
| null |
class JSON(Dumper):
"""Dump JavaScript object notation (JSON) format file.
Usage:
>>> dumper = JSON(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'json'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _vctr - dict, value counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
object ::= "{}" | ("{" members "}")
members ::= pair | (pair "," members)
pair ::= string ":" value
array ::= "[]" | ("[" elements "]")
elements ::= value | (value "," elements)
value ::= string | number | object
| array | true | false | null
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'json'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # object
datetime.date, # date
int, float, complex, # number
type(None), # null
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
_vctr = collections.defaultdict(int) # value counter dict
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_cmma = ',\n' if self._vctr[self._tctr] else ''
_keys = '{cmma}{tabs}"{name}" :'.format(cmma=_cmma, tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._vctr[self._tctr] += 1
self._append_object(value, _file)
##########################################################################
# Functions.
##########################################################################
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' ['
_file.write(_labs)
self._tctr += 1
for _item in value:
_cmma = ',' if self._vctr[self._tctr] else ''
_file.write(_cmma)
self._vctr[self._tctr] += 1
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_labs = ' ]'
_file.write(_labs)
def _append_string(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = str(value).replace('"', '\\"')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_bytes(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_text = ' '.join(textwrap.wrap(value.hex(), 2))
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'true' if value else 'false'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_null(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write null contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'null'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/json.py
|
JSON._append_string
|
python
|
def _append_string(self, value, _file): # pylint: disable=no-self-use
_text = str(value).replace('"', '\\"')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
|
Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/json.py#L225-L235
| null |
class JSON(Dumper):
"""Dump JavaScript object notation (JSON) format file.
Usage:
>>> dumper = JSON(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'json'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _vctr - dict, value counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
object ::= "{}" | ("{" members "}")
members ::= pair | (pair "," members)
pair ::= string ":" value
array ::= "[]" | ("[" elements "]")
elements ::= value | (value "," elements)
value ::= string | number | object
| array | true | false | null
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'json'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # object
datetime.date, # date
int, float, complex, # number
type(None), # null
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
_vctr = collections.defaultdict(int) # value counter dict
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_cmma = ',\n' if self._vctr[self._tctr] else ''
_keys = '{cmma}{tabs}"{name}" :'.format(cmma=_cmma, tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._vctr[self._tctr] += 1
self._append_object(value, _file)
##########################################################################
# Functions.
##########################################################################
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' ['
_file.write(_labs)
self._tctr += 1
for _item in value:
_cmma = ',' if self._vctr[self._tctr] else ''
_file.write(_cmma)
self._vctr[self._tctr] += 1
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_labs = ' ]'
_file.write(_labs)
def _append_object(self, value, _file):
"""Call this function to write object contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' {'
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
_tabs = '\t' * self._tctr
_cmma = ',' if self._vctr[self._tctr] else ''
_keys = '{cmma}\n{tabs}"{item}" :'.format(cmma=_cmma, tabs=_tabs, item=_item)
_file.write(_keys)
self._vctr[self._tctr] += 1
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '\n{tabs}{}'.format('}', tabs=_tabs)
_file.write(_labs)
def _append_bytes(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_text = ' '.join(textwrap.wrap(value.hex(), 2))
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'true' if value else 'false'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_null(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write null contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'null'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/json.py
|
JSON._append_bytes
|
python
|
def _append_bytes(self, value, _file): # pylint: disable=no-self-use
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_text = ' '.join(textwrap.wrap(value.hex(), 2))
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
|
Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/json.py#L237-L253
| null |
class JSON(Dumper):
"""Dump JavaScript object notation (JSON) format file.
Usage:
>>> dumper = JSON(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'json'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _vctr - dict, value counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
object ::= "{}" | ("{" members "}")
members ::= pair | (pair "," members)
pair ::= string ":" value
array ::= "[]" | ("[" elements "]")
elements ::= value | (value "," elements)
value ::= string | number | object
| array | true | false | null
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'json'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # object
datetime.date, # date
int, float, complex, # number
type(None), # null
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
_vctr = collections.defaultdict(int) # value counter dict
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_cmma = ',\n' if self._vctr[self._tctr] else ''
_keys = '{cmma}{tabs}"{name}" :'.format(cmma=_cmma, tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._vctr[self._tctr] += 1
self._append_object(value, _file)
##########################################################################
# Functions.
##########################################################################
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' ['
_file.write(_labs)
self._tctr += 1
for _item in value:
_cmma = ',' if self._vctr[self._tctr] else ''
_file.write(_cmma)
self._vctr[self._tctr] += 1
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_labs = ' ]'
_file.write(_labs)
def _append_object(self, value, _file):
"""Call this function to write object contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' {'
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
_tabs = '\t' * self._tctr
_cmma = ',' if self._vctr[self._tctr] else ''
_keys = '{cmma}\n{tabs}"{item}" :'.format(cmma=_cmma, tabs=_tabs, item=_item)
_file.write(_keys)
self._vctr[self._tctr] += 1
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '\n{tabs}{}'.format('}', tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = str(value).replace('"', '\\"')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_date(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'true' if value else 'false'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_null(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write null contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'null'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
JarryShaw/DictDumper
|
src/json.py
|
JSON._append_date
|
python
|
def _append_date(self, value, _file): # pylint: disable=no-self-use
_text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
|
Call this function to write date contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
train
|
https://github.com/JarryShaw/DictDumper/blob/430efcfdff18bb2421c3f27059ff94c93e621483/src/json.py#L255-L265
| null |
class JSON(Dumper):
"""Dump JavaScript object notation (JSON) format file.
Usage:
>>> dumper = JSON(file_name)
>>> dumper(content_dict_1, name=content_name_1)
>>> dumper(content_dict_2, name=content_name_2)
............
Properties:
* kind - str, return 'json'
Methods:
* object_hook - default/customised object hooks
Attributes:
* _file - FileIO, output file
* _sptr - int (file pointer), indicates start of appending point
* _tctr - int, tab level counter
* _hrst - str, _HEADER_START
* _hend - str, _HEADER_END
* _vctr - dict, value counter dict
Utilities:
* _dump_header - initially dump file heads and tails
* _append_value - call this function to write contents
Terminology:
object ::= "{}" | ("{" members "}")
members ::= pair | (pair "," members)
pair ::= string ":" value
array ::= "[]" | ("[" elements "]")
elements ::= value | (value "," elements)
value ::= string | number | object
| array | true | false | null
"""
##########################################################################
# Properties.
##########################################################################
@property
def kind(self):
"""File format of current dumper."""
return 'json'
##########################################################################
# Type codes.
##########################################################################
__type__ = (
str, # string
bool, # bool
dict, # object
datetime.date, # date
int, float, complex, # number
type(None), # null
bytes, bytearray, memoryview, # bytes
list, tuple, range, set, frozenset, # array
)
##########################################################################
# Attributes.
##########################################################################
_hsrt = _HEADER_START
_hend = _HEADER_END
_vctr = collections.defaultdict(int) # value counter dict
##########################################################################
# Utilities.
##########################################################################
def _append_value(self, value, _file, _name):
"""Call this function to write contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
* _name - str, name of current content dict
"""
_tabs = '\t' * self._tctr
_cmma = ',\n' if self._vctr[self._tctr] else ''
_keys = '{cmma}{tabs}"{name}" :'.format(cmma=_cmma, tabs=_tabs, name=_name)
_file.seek(self._sptr, os.SEEK_SET)
_file.write(_keys)
self._vctr[self._tctr] += 1
self._append_object(value, _file)
##########################################################################
# Functions.
##########################################################################
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' ['
_file.write(_labs)
self._tctr += 1
for _item in value:
_cmma = ',' if self._vctr[self._tctr] else ''
_file.write(_cmma)
self._vctr[self._tctr] += 1
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_labs = ' ]'
_file.write(_labs)
def _append_object(self, value, _file):
"""Call this function to write object contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' {'
_file.write(_labs)
self._tctr += 1
for (_item, _text) in value.items():
_tabs = '\t' * self._tctr
_cmma = ',' if self._vctr[self._tctr] else ''
_keys = '{cmma}\n{tabs}"{item}" :'.format(cmma=_cmma, tabs=_tabs, item=_item)
_file.write(_keys)
self._vctr[self._tctr] += 1
_text = self.object_hook(_text)
_type = type(_text).__name__
_MAGIC_TYPES[_type](self, _text, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_tabs = '\t' * self._tctr
_labs = '\n{tabs}{}'.format('}', tabs=_tabs)
_file.write(_labs)
def _append_string(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = str(value).replace('"', '\\"')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_bytes(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_text = ' '.join(textwrap.wrap(value.hex(), 2))
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
def _append_number(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write number contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = value
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_bool(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bool contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'true' if value else 'false'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
def _append_null(self, value, _file): # pylint: disable=unused-argument,no-self-use
"""Call this function to write null contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = 'null'
_labs = ' {text}'.format(text=_text)
_file.write(_labs)
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/mixin.py
|
DynamoDBMixin._on_dynamodb_exception
|
python
|
def _on_dynamodb_exception(self, error):
if isinstance(error, exceptions.ConditionalCheckFailedException):
raise web.HTTPError(409, reason='Condition Check Failure')
elif isinstance(error, exceptions.NoCredentialsError):
if _no_creds_should_return_429():
raise web.HTTPError(429, reason='Instance Credentials Failure')
elif isinstance(error, (exceptions.ThroughputExceeded,
exceptions.ThrottlingException)):
raise web.HTTPError(429, reason='Too Many Requests')
if hasattr(self, 'logger'):
self.logger.error('DynamoDB Error: %s', error)
raise web.HTTPError(500, reason=str(error))
|
Dynamically handle DynamoDB exceptions, returning HTTP error
responses.
:param exceptions.DynamoDBException error:
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/mixin.py#L39-L56
| null |
class DynamoDBMixin(object):
"""The DynamoDBMixin is an opinionated :class:`~tornado.web.RequestHandler`
mixin class that
"""
def initialize(self):
super(DynamoDBMixin, self).initialize()
self.application.dynamodb.set_error_callback(
self._on_dynamodb_exception)
if influxdb:
self.application.dynamodb.set_instrumentation_callback(
self._record_dynamodb_execution)
@staticmethod
def _record_dynamodb_execution(measurements):
for row in measurements:
measurement = influxdb.Measurement(INFLUXDB_DATABASE,
INFLUXDB_MEASUREMENT)
measurement.set_timestamp(row.timestamp)
measurement.set_tag('action', row.action)
measurement.set_tag('table', row.table)
measurement.set_tag('attempt', row.attempt)
if row.error:
measurement.set_tag('error', row.error)
measurement.set_field('duration', row.duration)
influxdb.add_measurement(measurement)
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/utils.py
|
marshall
|
python
|
def marshall(values):
serialized = {}
for key in values:
serialized[key] = _marshall_value(values[key])
return serialized
|
Marshall a `dict` into something DynamoDB likes.
:param dict values: The values to marshall
:rtype: dict
:raises ValueError: if an unsupported type is encountered
Return the values in a nested dict structure that is required for
writing the values to DynamoDB.
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/utils.py#L40-L55
|
[
"def _marshall_value(value):\n \"\"\"\n Recursively transform `value` into an AttributeValue `dict`\n\n :param mixed value: The value to encode\n :rtype: dict\n :raises ValueError: for unsupported types\n\n Return the value as dict indicating the data type and transform or\n recursively process the value if required.\n\n \"\"\"\n if PYTHON3 and isinstance(value, bytes):\n return {'B': base64.b64encode(value).decode('ascii')}\n elif PYTHON3 and isinstance(value, str):\n return {'S': value}\n elif not PYTHON3 and isinstance(value, str):\n if is_binary(value):\n return {'B': base64.b64encode(value).decode('ascii')}\n return {'S': value}\n elif not PYTHON3 and isinstance(value, unicode):\n return {'S': value.encode('utf-8')}\n elif isinstance(value, dict):\n return {'M': marshall(value)}\n elif isinstance(value, bool):\n return {'BOOL': value}\n elif isinstance(value, (int, float)):\n return {'N': str(value)}\n elif isinstance(value, datetime.datetime):\n return {'S': value.isoformat()}\n elif isinstance(value, uuid.UUID):\n return {'S': str(value)}\n elif isinstance(value, list):\n return {'L': [_marshall_value(v) for v in value]}\n elif isinstance(value, set):\n if PYTHON3 and all([isinstance(v, bytes) for v in value]):\n return {'BS': _encode_binary_set(value)}\n elif PYTHON3 and all([isinstance(v, str) for v in value]):\n return {'SS': sorted(list(value))}\n elif all([isinstance(v, (int, float)) for v in value]):\n return {'NS': sorted([str(v) for v in value])}\n elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \\\n all([is_binary(v) for v in value]):\n return {'BS': _encode_binary_set(value)}\n elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \\\n all([is_binary(v) is False for v in value]):\n return {'SS': sorted(list(value))}\n else:\n raise ValueError('Can not mix types in a set')\n elif value is None:\n return {'NULL': True}\n raise ValueError('Unsupported type: %s' % type(value))\n"
] |
"""
Utilities for working with DynamoDB.
- :func:`.marshall`
- :func:`.unmarshal`
This module contains some helpers that make working with the
Amazon DynamoDB API a little less painful. Data is encoded as
`AttributeValue`_ structures in the JSON payloads and this module
defines functions that will handle the transcoding for you for
the vast majority of types that we use.
.. _AttributeValue: http://docs.aws.amazon.com/amazondynamodb/latest/
APIReference/API_AttributeValue.html
"""
import base64
import datetime
import uuid
import sys
PYTHON3 = True if sys.version_info > (3, 0, 0) else False
TEXT_CHARS = bytearray({7, 8, 9, 10, 12, 13, 27} |
set(range(0x20, 0x100)) - {0x7f})
if PYTHON3: # pragma: nocover
unicode = str
def is_binary(value):
"""
Check to see if a string contains binary data in Python2
:param str|bytes value: The value to check
:rtype: bool
"""
return bool(value.translate(None, TEXT_CHARS))
def unmarshall(values):
"""
Transform a response payload from DynamoDB to a native dict
:param dict values: The response payload from DynamoDB
:rtype: dict
:raises ValueError: if an unsupported type code is encountered
"""
unmarshalled = {}
for key in values:
unmarshalled[key] = _unmarshall_dict(values[key])
return unmarshalled
def _encode_binary_set(value):
"""Base64 encode binary values in list of values.
:param set value: The list of binary values
:rtype: list
"""
return sorted([base64.b64encode(v).decode('ascii') for v in value])
def _marshall_value(value):
"""
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required.
"""
if PYTHON3 and isinstance(value, bytes):
return {'B': base64.b64encode(value).decode('ascii')}
elif PYTHON3 and isinstance(value, str):
return {'S': value}
elif not PYTHON3 and isinstance(value, str):
if is_binary(value):
return {'B': base64.b64encode(value).decode('ascii')}
return {'S': value}
elif not PYTHON3 and isinstance(value, unicode):
return {'S': value.encode('utf-8')}
elif isinstance(value, dict):
return {'M': marshall(value)}
elif isinstance(value, bool):
return {'BOOL': value}
elif isinstance(value, (int, float)):
return {'N': str(value)}
elif isinstance(value, datetime.datetime):
return {'S': value.isoformat()}
elif isinstance(value, uuid.UUID):
return {'S': str(value)}
elif isinstance(value, list):
return {'L': [_marshall_value(v) for v in value]}
elif isinstance(value, set):
if PYTHON3 and all([isinstance(v, bytes) for v in value]):
return {'BS': _encode_binary_set(value)}
elif PYTHON3 and all([isinstance(v, str) for v in value]):
return {'SS': sorted(list(value))}
elif all([isinstance(v, (int, float)) for v in value]):
return {'NS': sorted([str(v) for v in value])}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) for v in value]):
return {'BS': _encode_binary_set(value)}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) is False for v in value]):
return {'SS': sorted(list(value))}
else:
raise ValueError('Can not mix types in a set')
elif value is None:
return {'NULL': True}
raise ValueError('Unsupported type: %s' % type(value))
def _to_number(value):
"""
Convert the string containing a number to a number
:param str value: The value to convert
:rtype: float|int
"""
return float(value) if '.' in value else int(value)
def _unmarshall_dict(value):
"""Unmarshall a single dict value from a row that was returned from
DynamoDB, returning the value as a normal Python dict.
:param dict value: The value to unmarshall
:rtype: mixed
:raises ValueError: if an unsupported type code is encountered
"""
key = list(value.keys()).pop()
if key == 'B':
return base64.b64decode(value[key].encode('ascii'))
elif key == 'BS':
return set([base64.b64decode(v.encode('ascii'))
for v in value[key]])
elif key == 'BOOL':
return value[key]
elif key == 'L':
return [_unmarshall_dict(v) for v in value[key]]
elif key == 'M':
return unmarshall(value[key])
elif key == 'NULL':
return None
elif key == 'N':
return _to_number(value[key])
elif key == 'NS':
return set([_to_number(v) for v in value[key]])
elif key == 'S':
return value[key]
elif key == 'SS':
return set([v for v in value[key]])
raise ValueError('Unsupported value type: %s' % key)
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/utils.py
|
unmarshall
|
python
|
def unmarshall(values):
unmarshalled = {}
for key in values:
unmarshalled[key] = _unmarshall_dict(values[key])
return unmarshalled
|
Transform a response payload from DynamoDB to a native dict
:param dict values: The response payload from DynamoDB
:rtype: dict
:raises ValueError: if an unsupported type code is encountered
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/utils.py#L58-L70
|
[
"def _unmarshall_dict(value):\n \"\"\"Unmarshall a single dict value from a row that was returned from\n DynamoDB, returning the value as a normal Python dict.\n\n :param dict value: The value to unmarshall\n :rtype: mixed\n :raises ValueError: if an unsupported type code is encountered\n\n \"\"\"\n key = list(value.keys()).pop()\n if key == 'B':\n return base64.b64decode(value[key].encode('ascii'))\n elif key == 'BS':\n return set([base64.b64decode(v.encode('ascii'))\n for v in value[key]])\n elif key == 'BOOL':\n return value[key]\n elif key == 'L':\n return [_unmarshall_dict(v) for v in value[key]]\n elif key == 'M':\n return unmarshall(value[key])\n elif key == 'NULL':\n return None\n elif key == 'N':\n return _to_number(value[key])\n elif key == 'NS':\n return set([_to_number(v) for v in value[key]])\n elif key == 'S':\n return value[key]\n elif key == 'SS':\n return set([v for v in value[key]])\n raise ValueError('Unsupported value type: %s' % key)\n"
] |
"""
Utilities for working with DynamoDB.
- :func:`.marshall`
- :func:`.unmarshal`
This module contains some helpers that make working with the
Amazon DynamoDB API a little less painful. Data is encoded as
`AttributeValue`_ structures in the JSON payloads and this module
defines functions that will handle the transcoding for you for
the vast majority of types that we use.
.. _AttributeValue: http://docs.aws.amazon.com/amazondynamodb/latest/
APIReference/API_AttributeValue.html
"""
import base64
import datetime
import uuid
import sys
PYTHON3 = True if sys.version_info > (3, 0, 0) else False
TEXT_CHARS = bytearray({7, 8, 9, 10, 12, 13, 27} |
set(range(0x20, 0x100)) - {0x7f})
if PYTHON3: # pragma: nocover
unicode = str
def is_binary(value):
"""
Check to see if a string contains binary data in Python2
:param str|bytes value: The value to check
:rtype: bool
"""
return bool(value.translate(None, TEXT_CHARS))
def marshall(values):
"""
Marshall a `dict` into something DynamoDB likes.
:param dict values: The values to marshall
:rtype: dict
:raises ValueError: if an unsupported type is encountered
Return the values in a nested dict structure that is required for
writing the values to DynamoDB.
"""
serialized = {}
for key in values:
serialized[key] = _marshall_value(values[key])
return serialized
def _encode_binary_set(value):
"""Base64 encode binary values in list of values.
:param set value: The list of binary values
:rtype: list
"""
return sorted([base64.b64encode(v).decode('ascii') for v in value])
def _marshall_value(value):
"""
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required.
"""
if PYTHON3 and isinstance(value, bytes):
return {'B': base64.b64encode(value).decode('ascii')}
elif PYTHON3 and isinstance(value, str):
return {'S': value}
elif not PYTHON3 and isinstance(value, str):
if is_binary(value):
return {'B': base64.b64encode(value).decode('ascii')}
return {'S': value}
elif not PYTHON3 and isinstance(value, unicode):
return {'S': value.encode('utf-8')}
elif isinstance(value, dict):
return {'M': marshall(value)}
elif isinstance(value, bool):
return {'BOOL': value}
elif isinstance(value, (int, float)):
return {'N': str(value)}
elif isinstance(value, datetime.datetime):
return {'S': value.isoformat()}
elif isinstance(value, uuid.UUID):
return {'S': str(value)}
elif isinstance(value, list):
return {'L': [_marshall_value(v) for v in value]}
elif isinstance(value, set):
if PYTHON3 and all([isinstance(v, bytes) for v in value]):
return {'BS': _encode_binary_set(value)}
elif PYTHON3 and all([isinstance(v, str) for v in value]):
return {'SS': sorted(list(value))}
elif all([isinstance(v, (int, float)) for v in value]):
return {'NS': sorted([str(v) for v in value])}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) for v in value]):
return {'BS': _encode_binary_set(value)}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) is False for v in value]):
return {'SS': sorted(list(value))}
else:
raise ValueError('Can not mix types in a set')
elif value is None:
return {'NULL': True}
raise ValueError('Unsupported type: %s' % type(value))
def _to_number(value):
"""
Convert the string containing a number to a number
:param str value: The value to convert
:rtype: float|int
"""
return float(value) if '.' in value else int(value)
def _unmarshall_dict(value):
"""Unmarshall a single dict value from a row that was returned from
DynamoDB, returning the value as a normal Python dict.
:param dict value: The value to unmarshall
:rtype: mixed
:raises ValueError: if an unsupported type code is encountered
"""
key = list(value.keys()).pop()
if key == 'B':
return base64.b64decode(value[key].encode('ascii'))
elif key == 'BS':
return set([base64.b64decode(v.encode('ascii'))
for v in value[key]])
elif key == 'BOOL':
return value[key]
elif key == 'L':
return [_unmarshall_dict(v) for v in value[key]]
elif key == 'M':
return unmarshall(value[key])
elif key == 'NULL':
return None
elif key == 'N':
return _to_number(value[key])
elif key == 'NS':
return set([_to_number(v) for v in value[key]])
elif key == 'S':
return value[key]
elif key == 'SS':
return set([v for v in value[key]])
raise ValueError('Unsupported value type: %s' % key)
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/utils.py
|
_marshall_value
|
python
|
def _marshall_value(value):
if PYTHON3 and isinstance(value, bytes):
return {'B': base64.b64encode(value).decode('ascii')}
elif PYTHON3 and isinstance(value, str):
return {'S': value}
elif not PYTHON3 and isinstance(value, str):
if is_binary(value):
return {'B': base64.b64encode(value).decode('ascii')}
return {'S': value}
elif not PYTHON3 and isinstance(value, unicode):
return {'S': value.encode('utf-8')}
elif isinstance(value, dict):
return {'M': marshall(value)}
elif isinstance(value, bool):
return {'BOOL': value}
elif isinstance(value, (int, float)):
return {'N': str(value)}
elif isinstance(value, datetime.datetime):
return {'S': value.isoformat()}
elif isinstance(value, uuid.UUID):
return {'S': str(value)}
elif isinstance(value, list):
return {'L': [_marshall_value(v) for v in value]}
elif isinstance(value, set):
if PYTHON3 and all([isinstance(v, bytes) for v in value]):
return {'BS': _encode_binary_set(value)}
elif PYTHON3 and all([isinstance(v, str) for v in value]):
return {'SS': sorted(list(value))}
elif all([isinstance(v, (int, float)) for v in value]):
return {'NS': sorted([str(v) for v in value])}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) for v in value]):
return {'BS': _encode_binary_set(value)}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) is False for v in value]):
return {'SS': sorted(list(value))}
else:
raise ValueError('Can not mix types in a set')
elif value is None:
return {'NULL': True}
raise ValueError('Unsupported type: %s' % type(value))
|
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required.
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/utils.py#L83-L134
|
[
"def marshall(values):\n \"\"\"\n Marshall a `dict` into something DynamoDB likes.\n\n :param dict values: The values to marshall\n :rtype: dict\n :raises ValueError: if an unsupported type is encountered\n\n Return the values in a nested dict structure that is required for\n writing the values to DynamoDB.\n\n \"\"\"\n serialized = {}\n for key in values:\n serialized[key] = _marshall_value(values[key])\n return serialized\n"
] |
"""
Utilities for working with DynamoDB.
- :func:`.marshall`
- :func:`.unmarshal`
This module contains some helpers that make working with the
Amazon DynamoDB API a little less painful. Data is encoded as
`AttributeValue`_ structures in the JSON payloads and this module
defines functions that will handle the transcoding for you for
the vast majority of types that we use.
.. _AttributeValue: http://docs.aws.amazon.com/amazondynamodb/latest/
APIReference/API_AttributeValue.html
"""
import base64
import datetime
import uuid
import sys
PYTHON3 = True if sys.version_info > (3, 0, 0) else False
TEXT_CHARS = bytearray({7, 8, 9, 10, 12, 13, 27} |
set(range(0x20, 0x100)) - {0x7f})
if PYTHON3: # pragma: nocover
unicode = str
def is_binary(value):
"""
Check to see if a string contains binary data in Python2
:param str|bytes value: The value to check
:rtype: bool
"""
return bool(value.translate(None, TEXT_CHARS))
def marshall(values):
"""
Marshall a `dict` into something DynamoDB likes.
:param dict values: The values to marshall
:rtype: dict
:raises ValueError: if an unsupported type is encountered
Return the values in a nested dict structure that is required for
writing the values to DynamoDB.
"""
serialized = {}
for key in values:
serialized[key] = _marshall_value(values[key])
return serialized
def unmarshall(values):
"""
Transform a response payload from DynamoDB to a native dict
:param dict values: The response payload from DynamoDB
:rtype: dict
:raises ValueError: if an unsupported type code is encountered
"""
unmarshalled = {}
for key in values:
unmarshalled[key] = _unmarshall_dict(values[key])
return unmarshalled
def _encode_binary_set(value):
"""Base64 encode binary values in list of values.
:param set value: The list of binary values
:rtype: list
"""
return sorted([base64.b64encode(v).decode('ascii') for v in value])
def _to_number(value):
"""
Convert the string containing a number to a number
:param str value: The value to convert
:rtype: float|int
"""
return float(value) if '.' in value else int(value)
def _unmarshall_dict(value):
"""Unmarshall a single dict value from a row that was returned from
DynamoDB, returning the value as a normal Python dict.
:param dict value: The value to unmarshall
:rtype: mixed
:raises ValueError: if an unsupported type code is encountered
"""
key = list(value.keys()).pop()
if key == 'B':
return base64.b64decode(value[key].encode('ascii'))
elif key == 'BS':
return set([base64.b64decode(v.encode('ascii'))
for v in value[key]])
elif key == 'BOOL':
return value[key]
elif key == 'L':
return [_unmarshall_dict(v) for v in value[key]]
elif key == 'M':
return unmarshall(value[key])
elif key == 'NULL':
return None
elif key == 'N':
return _to_number(value[key])
elif key == 'NS':
return set([_to_number(v) for v in value[key]])
elif key == 'S':
return value[key]
elif key == 'SS':
return set([v for v in value[key]])
raise ValueError('Unsupported value type: %s' % key)
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/utils.py
|
_unmarshall_dict
|
python
|
def _unmarshall_dict(value):
key = list(value.keys()).pop()
if key == 'B':
return base64.b64decode(value[key].encode('ascii'))
elif key == 'BS':
return set([base64.b64decode(v.encode('ascii'))
for v in value[key]])
elif key == 'BOOL':
return value[key]
elif key == 'L':
return [_unmarshall_dict(v) for v in value[key]]
elif key == 'M':
return unmarshall(value[key])
elif key == 'NULL':
return None
elif key == 'N':
return _to_number(value[key])
elif key == 'NS':
return set([_to_number(v) for v in value[key]])
elif key == 'S':
return value[key]
elif key == 'SS':
return set([v for v in value[key]])
raise ValueError('Unsupported value type: %s' % key)
|
Unmarshall a single dict value from a row that was returned from
DynamoDB, returning the value as a normal Python dict.
:param dict value: The value to unmarshall
:rtype: mixed
:raises ValueError: if an unsupported type code is encountered
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/utils.py#L148-L179
|
[
"def unmarshall(values):\n \"\"\"\n Transform a response payload from DynamoDB to a native dict\n\n :param dict values: The response payload from DynamoDB\n :rtype: dict\n :raises ValueError: if an unsupported type code is encountered\n\n \"\"\"\n unmarshalled = {}\n for key in values:\n unmarshalled[key] = _unmarshall_dict(values[key])\n return unmarshalled\n",
"def _to_number(value):\n \"\"\"\n Convert the string containing a number to a number\n\n :param str value: The value to convert\n :rtype: float|int\n\n \"\"\"\n return float(value) if '.' in value else int(value)\n"
] |
"""
Utilities for working with DynamoDB.
- :func:`.marshall`
- :func:`.unmarshal`
This module contains some helpers that make working with the
Amazon DynamoDB API a little less painful. Data is encoded as
`AttributeValue`_ structures in the JSON payloads and this module
defines functions that will handle the transcoding for you for
the vast majority of types that we use.
.. _AttributeValue: http://docs.aws.amazon.com/amazondynamodb/latest/
APIReference/API_AttributeValue.html
"""
import base64
import datetime
import uuid
import sys
PYTHON3 = True if sys.version_info > (3, 0, 0) else False
TEXT_CHARS = bytearray({7, 8, 9, 10, 12, 13, 27} |
set(range(0x20, 0x100)) - {0x7f})
if PYTHON3: # pragma: nocover
unicode = str
def is_binary(value):
"""
Check to see if a string contains binary data in Python2
:param str|bytes value: The value to check
:rtype: bool
"""
return bool(value.translate(None, TEXT_CHARS))
def marshall(values):
"""
Marshall a `dict` into something DynamoDB likes.
:param dict values: The values to marshall
:rtype: dict
:raises ValueError: if an unsupported type is encountered
Return the values in a nested dict structure that is required for
writing the values to DynamoDB.
"""
serialized = {}
for key in values:
serialized[key] = _marshall_value(values[key])
return serialized
def unmarshall(values):
"""
Transform a response payload from DynamoDB to a native dict
:param dict values: The response payload from DynamoDB
:rtype: dict
:raises ValueError: if an unsupported type code is encountered
"""
unmarshalled = {}
for key in values:
unmarshalled[key] = _unmarshall_dict(values[key])
return unmarshalled
def _encode_binary_set(value):
"""Base64 encode binary values in list of values.
:param set value: The list of binary values
:rtype: list
"""
return sorted([base64.b64encode(v).decode('ascii') for v in value])
def _marshall_value(value):
"""
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required.
"""
if PYTHON3 and isinstance(value, bytes):
return {'B': base64.b64encode(value).decode('ascii')}
elif PYTHON3 and isinstance(value, str):
return {'S': value}
elif not PYTHON3 and isinstance(value, str):
if is_binary(value):
return {'B': base64.b64encode(value).decode('ascii')}
return {'S': value}
elif not PYTHON3 and isinstance(value, unicode):
return {'S': value.encode('utf-8')}
elif isinstance(value, dict):
return {'M': marshall(value)}
elif isinstance(value, bool):
return {'BOOL': value}
elif isinstance(value, (int, float)):
return {'N': str(value)}
elif isinstance(value, datetime.datetime):
return {'S': value.isoformat()}
elif isinstance(value, uuid.UUID):
return {'S': str(value)}
elif isinstance(value, list):
return {'L': [_marshall_value(v) for v in value]}
elif isinstance(value, set):
if PYTHON3 and all([isinstance(v, bytes) for v in value]):
return {'BS': _encode_binary_set(value)}
elif PYTHON3 and all([isinstance(v, str) for v in value]):
return {'SS': sorted(list(value))}
elif all([isinstance(v, (int, float)) for v in value]):
return {'NS': sorted([str(v) for v in value])}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) for v in value]):
return {'BS': _encode_binary_set(value)}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) is False for v in value]):
return {'SS': sorted(list(value))}
else:
raise ValueError('Can not mix types in a set')
elif value is None:
return {'NULL': True}
raise ValueError('Unsupported type: %s' % type(value))
def _to_number(value):
"""
Convert the string containing a number to a number
:param str value: The value to convert
:rtype: float|int
"""
return float(value) if '.' in value else int(value)
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
_unwrap_result
|
python
|
def _unwrap_result(action, result):
if not result:
return
elif action in {'DeleteItem', 'PutItem', 'UpdateItem'}:
return _unwrap_delete_put_update_item(result)
elif action == 'GetItem':
return _unwrap_get_item(result)
elif action == 'Query' or action == 'Scan':
return _unwrap_query_scan(result)
elif action == 'CreateTable':
return _unwrap_create_table(result)
elif action == 'DescribeTable':
return _unwrap_describe_table(result)
return result
|
Unwrap a request response and return only the response data.
:param str action: The action name
:param result: The result of the action
:type: result: list or dict
:rtype: dict | None
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L943-L964
| null |
"""
DynamoDB Client
===============
"""
import collections
import json
import logging
import os
import select as _select
import socket
import ssl
import time
from tornado import concurrent, gen, httpclient, ioloop
import tornado_aws
from tornado_aws import exceptions as aws_exceptions
from sprockets_dynamodb import exceptions, utils
LOGGER = logging.getLogger(__name__)
Measurement = collections.namedtuple(
'Measurement',
['timestamp', 'action', 'table', 'attempt', 'duration', 'error'])
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
def _unwrap_delete_put_update_item(result):
response = {
'Attributes': utils.unmarshall(result['Attributes'] if result else {})
}
if 'ConsumedCapacity' in result:
response['ConsumedCapacity'] = result['ConsumedCapacity']
if 'ItemCollectionMetrics' in result:
response['ItemCollectionMetrics'] = {
'ItemCollectionKey': utils.unmarshall(
result['ItemCollectionMetrics'].get('ItemCollectionKey', {})),
'SizeEstimateRangeGB':
result['ItemCollectionMetrics'].get('SizeEstimateRangeGB',
[None]).pop()
}
return response
def _unwrap_get_item(result):
response = {
'Item': utils.unmarshall(result['Item'] if result else {})
}
if 'ConsumedCapacity' in result:
response['ConsumedCapacity'] = result['ConsumedCapacity']
return response
def _unwrap_query_scan(result):
response = {
'Count': result.get('Count', 0),
'Items': [utils.unmarshall(i) for i in result.get('Items', [])],
'ScannedCount': result.get('ScannedCount', 0)
}
if 'LastEvaluatedKey' in result:
response['LastEvaluatedKey'] = \
utils.unmarshall(result['LastEvaluatedKey'])
if 'ConsumedCapacity' in result:
response['ConsumedCapacity'] = result['ConsumedCapacity']
return response
def _unwrap_create_table(result):
return result['TableDescription']
def _unwrap_describe_table(result):
return result['Table']
def _validate_return_consumed_capacity(value):
if value not in ['INDEXES', 'TOTAL', 'NONE']:
raise ValueError('Invalid return_consumed_capacity value')
def _validate_return_item_collection_metrics(value):
if value not in ['NONE', 'SIZE']:
raise ValueError('Invalid return_item_collection_metrics value')
def _validate_return_values(value):
if value not in ['NONE', 'ALL_NEW', 'ALL_OLD',
'UPDATED_NEW', 'UPDATED_OLD']:
raise ValueError('Invalid return_values value')
def _validate_select(value):
if value not in ['ALL_ATTRIBUTES', 'ALL_PROJECTED_ATTRIBUTES', 'COUNT',
'SPECIFIC_ATTRIBUTES']:
raise ValueError('Invalid select value')
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.list_tables
|
python
|
def list_tables(self, exclusive_start_table_name=None, limit=None):
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
|
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L176-L200
| null |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.put_item
|
python
|
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
|
Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L202-L276
|
[
"def marshall(values):\n \"\"\"\n Marshall a `dict` into something DynamoDB likes.\n\n :param dict values: The values to marshall\n :rtype: dict\n :raises ValueError: if an unsupported type is encountered\n\n Return the values in a nested dict structure that is required for\n writing the values to DynamoDB.\n\n \"\"\"\n serialized = {}\n for key in values:\n serialized[key] = _marshall_value(values[key])\n return serialized\n",
"def _validate_return_values(value):\n if value not in ['NONE', 'ALL_NEW', 'ALL_OLD',\n 'UPDATED_NEW', 'UPDATED_OLD']:\n raise ValueError('Invalid return_values value')\n"
] |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.get_item
|
python
|
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
|
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L278-L331
|
[
"def marshall(values):\n \"\"\"\n Marshall a `dict` into something DynamoDB likes.\n\n :param dict values: The values to marshall\n :rtype: dict\n :raises ValueError: if an unsupported type is encountered\n\n Return the values in a nested dict structure that is required for\n writing the values to DynamoDB.\n\n \"\"\"\n serialized = {}\n for key in values:\n serialized[key] = _marshall_value(values[key])\n return serialized\n",
"def _validate_return_consumed_capacity(value):\n if value not in ['INDEXES', 'TOTAL', 'NONE']:\n raise ValueError('Invalid return_consumed_capacity value')\n"
] |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.update_item
|
python
|
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
|
Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L333-L411
|
[
"def marshall(values):\n \"\"\"\n Marshall a `dict` into something DynamoDB likes.\n\n :param dict values: The values to marshall\n :rtype: dict\n :raises ValueError: if an unsupported type is encountered\n\n Return the values in a nested dict structure that is required for\n writing the values to DynamoDB.\n\n \"\"\"\n serialized = {}\n for key in values:\n serialized[key] = _marshall_value(values[key])\n return serialized\n",
"def _validate_return_values(value):\n if value not in ['NONE', 'ALL_NEW', 'ALL_OLD',\n 'UPDATED_NEW', 'UPDATED_OLD']:\n raise ValueError('Invalid return_values value')\n",
"def _validate_return_consumed_capacity(value):\n if value not in ['INDEXES', 'TOTAL', 'NONE']:\n raise ValueError('Invalid return_consumed_capacity value')\n",
"def _validate_return_item_collection_metrics(value):\n if value not in ['NONE', 'SIZE']:\n raise ValueError('Invalid return_item_collection_metrics value')\n"
] |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.query
|
python
|
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
|
A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L502-L653
|
[
"def marshall(values):\n \"\"\"\n Marshall a `dict` into something DynamoDB likes.\n\n :param dict values: The values to marshall\n :rtype: dict\n :raises ValueError: if an unsupported type is encountered\n\n Return the values in a nested dict structure that is required for\n writing the values to DynamoDB.\n\n \"\"\"\n serialized = {}\n for key in values:\n serialized[key] = _marshall_value(values[key])\n return serialized\n",
"def _validate_return_consumed_capacity(value):\n if value not in ['INDEXES', 'TOTAL', 'NONE']:\n raise ValueError('Invalid return_consumed_capacity value')\n",
"def _validate_select(value):\n if value not in ['ALL_ATTRIBUTES', 'ALL_PROJECTED_ATTRIBUTES', 'COUNT',\n 'SPECIFIC_ATTRIBUTES']:\n raise ValueError('Invalid select value')\n"
] |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.scan
|
python
|
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
|
The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L655-L726
|
[
"def marshall(values):\n \"\"\"\n Marshall a `dict` into something DynamoDB likes.\n\n :param dict values: The values to marshall\n :rtype: dict\n :raises ValueError: if an unsupported type is encountered\n\n Return the values in a nested dict structure that is required for\n writing the values to DynamoDB.\n\n \"\"\"\n serialized = {}\n for key in values:\n serialized[key] = _marshall_value(values[key])\n return serialized\n",
"def _validate_return_consumed_capacity(value):\n if value not in ['INDEXES', 'TOTAL', 'NONE']:\n raise ValueError('Invalid return_consumed_capacity value')\n",
"def _validate_select(value):\n if value not in ['ALL_ATTRIBUTES', 'ALL_PROJECTED_ATTRIBUTES', 'COUNT',\n 'SPECIFIC_ATTRIBUTES']:\n raise ValueError('Invalid select value')\n"
] |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.execute
|
python
|
def execute(self, action, parameters):
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
|
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L729-L790
|
[
"def _unwrap_result(action, result):\n \"\"\"Unwrap a request response and return only the response data.\n\n :param str action: The action name\n :param result: The result of the action\n :type: result: list or dict\n :rtype: dict | None\n\n \"\"\"\n if not result:\n return\n elif action in {'DeleteItem', 'PutItem', 'UpdateItem'}:\n return _unwrap_delete_put_update_item(result)\n elif action == 'GetItem':\n return _unwrap_get_item(result)\n elif action == 'Query' or action == 'Scan':\n return _unwrap_query_scan(result)\n elif action == 'CreateTable':\n return _unwrap_create_table(result)\n elif action == 'DescribeTable':\n return _unwrap_describe_table(result)\n return result\n",
"def _execute(self, action, parameters, attempt, measurements):\n \"\"\"Invoke a DynamoDB action\n\n :param str action: DynamoDB action to invoke\n :param dict parameters: parameters to send into the action\n :param int attempt: Which attempt number this is\n :param list measurements: A list for accumulating request measurements\n :rtype: tornado.concurrent.Future\n\n \"\"\"\n future = concurrent.Future()\n start = time.time()\n\n def handle_response(request):\n \"\"\"Invoked by the IOLoop when fetch has a response to process.\n\n :param tornado.concurrent.Future request: The request future\n\n \"\"\"\n self._on_response(\n action, parameters.get('TableName', 'Unknown'), attempt,\n start, request, future, measurements)\n\n ioloop.IOLoop.current().add_future(self._client.fetch(\n 'POST', '/',\n body=json.dumps(parameters).encode('utf-8'),\n headers={\n 'x-amz-target': 'DynamoDB_20120810.{}'.format(action),\n 'Content-Type': 'application/x-amz-json-1.0',\n }), handle_response)\n return future\n",
"def _on_exception(self, error):\n \"\"\"Handle exceptions that can not be retried.\n\n :param error: The exception that was raised\n :type error: sprockets_dynamodb.exceptions.DynamoDBException\n\n \"\"\"\n if not self._on_error:\n raise error\n self._on_error(error)\n",
"def _sleep_duration(attempt):\n \"\"\"Calculates how long to sleep between exceptions. Returns a value\n in seconds.\n\n :param int attempt: The attempt number\n :rtype: float\n\n \"\"\"\n return (float(2 ** attempt) * 100) / 1000\n"
] |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.set_error_callback
|
python
|
def set_error_callback(self, callback):
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
|
Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L792-L800
| null |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client.set_instrumentation_callback
|
python
|
def set_instrumentation_callback(self, callback):
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
|
Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L802-L810
| null |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client._execute
|
python
|
def _execute(self, action, parameters, attempt, measurements):
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
|
Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L812-L842
| null |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client._on_response
|
python
|
def _on_response(self, action, table, attempt, start, response, future,
measurements):
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
|
Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L855-L907
| null |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
@staticmethod
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
sprockets/sprockets-dynamodb
|
sprockets_dynamodb/client.py
|
Client._process_response
|
python
|
def _process_response(response):
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8'))
|
Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
|
train
|
https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L910-L929
| null |
class Client(object):
"""
Asynchronous DynamoDB Client
:keyword str region: AWS region to send requests to
:keyword str access_key: AWS access key. If unspecified, this
defaults to the :envvar:`AWS_ACCESS_KEY_ID` environment
variable and will fall back to using the AWS CLI credentials
file. See :class:`tornado_aws.client.AsyncAWSClient` for
more details.
:keyword str secret_key: AWS secret used to secure API calls.
If unspecified, this defaults to the :envvar:`AWS_SECRET_ACCESS_KEY`
environment variable and will fall back to using the AWS CLI
credentials as described in :class:`tornado_aws.client.AsyncAWSClient`.
:keyword str profile: optional profile to use in AWS API calls.
If unspecified, this defaults to the :envvar:`AWS_DEFAULT_PROFILE`
environment variable or ``default`` if unset.
:keyword str endpoint: DynamoDB endpoint to contact. If unspecified,
the default is determined by the region.
:keyword int max_clients: optional maximum number of HTTP requests
that may be performed in parallel.
:keyword int max_retries: Maximum number of times to retry a request when
if fails under certain conditions. Can also be set with the
:envvar:`DYNAMODB_MAX_RETRIES` environment variable.
:keyword method instrumentation_callback: A method that is invoked with a
list of measurements that were collected during the execution of an
individual action.
:keyword method on_error_callback: A method that is invoked when there is
a request exception that can not automatically be retried or the
maximum number of retries has been exceeded for a request.
Any of the methods invoked in the client can raise the following
exceptions:
- :exc:`sprockets_dynamodb.exceptions.DynamoDBException`
- :exc:`sprockets_dynamodb.exceptions.ConfigNotFound`
- :exc:`sprockets_dynamodb.exceptions.NoCredentialsError`
- :exc:`sprockets_dynamodb.exceptions.NoProfileError`
- :exc:`sprockets_dynamodb.exceptions.TimeoutException`
- :exc:`sprockets_dynamodb.exceptions.RequestException`
- :exc:`sprockets_dynamodb.exceptions.InternalFailure`
- :exc:`sprockets_dynamodb.exceptions.LimitExceeded`
- :exc:`sprockets_dynamodb.exceptions.MissingParameter`
- :exc:`sprockets_dynamodb.exceptions.OptInRequired`
- :exc:`sprockets_dynamodb.exceptions.ResourceInUse`
- :exc:`sprockets_dynamodb.exceptions.RequestExpired`
- :exc:`sprockets_dynamodb.exceptions.ServiceUnavailable`
- :exc:`sprockets_dynamodb.exceptions.ValidationException`
Create an instance of this class to interact with a DynamoDB
server. A :class:`tornado_aws.client.AsyncAWSClient` instance
implements the AWS API wrapping and this class provides the
DynamoDB specifics.
"""
DEFAULT_MAX_RETRIES = 3
def __init__(self, **kwargs):
self.logger = LOGGER.getChild(self.__class__.__name__)
if os.environ.get('DYNAMODB_ENDPOINT', None):
kwargs.setdefault('endpoint', os.environ['DYNAMODB_ENDPOINT'])
self._client = tornado_aws.AsyncAWSClient('dynamodb', **kwargs)
self._ioloop = kwargs.get('io_loop', ioloop.IOLoop.current())
self._max_retries = kwargs.get(
'max_retries', os.environ.get(
'DYNAMODB_MAX_RETRIES', self.DEFAULT_MAX_RETRIES))
self._instrumentation_callback = kwargs.get('instrumentation_callback')
self._on_error = kwargs.get('on_error_callback')
def create_table(self, table_definition):
"""
Invoke the ``CreateTable`` function.
:param dict table_definition: description of the table to
create according to `CreateTable`_
:rtype: tornado.concurrent.Future
.. _CreateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_CreateTable.html
"""
return self.execute('CreateTable', table_definition)
def update_table(self, table_definition):
"""
Modifies the provisioned throughput settings, global secondary
indexes, or DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
- Modify the provisioned throughput settings of the table.
- Enable or disable Streams on the table.
- Remove a global secondary index from the table.
- Create a new global secondary index on the table. Once the index
begins back-filling, you can use *UpdateTable* to perform other
operations.
*UpdateTable* is an asynchronous operation; while it is executing, the
table status changes from ``ACTIVE`` to ``UPDATING``. While it is
``UPDATING``, you cannot issue another *UpdateTable* request. When the
table returns to the ``ACTIVE`` state, the *UpdateTable* operation is
complete.
:param dict table_definition: description of the table to
update according to `UpdateTable`_
:rtype: tornado.concurrent.Future
.. _UpdateTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateTable.html
"""
raise NotImplementedError
def delete_table(self, table_name):
"""
Invoke the `DeleteTable`_ function. The DeleteTable operation deletes a
table and all of its items. After a DeleteTable request, the specified
table is in the DELETING state until DynamoDB completes the deletion.
If the table is in the ACTIVE state, you can delete it. If a table is
in CREATING or UPDATING states, then a
:py:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
exception is raised. If the specified table does not exist, a
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
exception is raised. If table is already in the DELETING state, no
error is returned.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DeleteTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteTable.html
"""
return self.execute('DeleteTable', {'TableName': table_name})
def describe_table(self, table_name):
"""
Invoke the `DescribeTable`_ function.
:param str table_name: name of the table to describe.
:rtype: tornado.concurrent.Future
.. _DescribeTable: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DescribeTable.html
"""
return self.execute('DescribeTable', {'TableName': table_name})
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload)
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload)
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload)
def delete_item(self, table_name, key_dict,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=False):
"""Invoke the `DeleteItem`_ function that deletes a single item in a
table by primary key. You can perform a conditional delete operation
that deletes the item if it exists, or if it has an expected attribute
value.
:param str table_name: The name of the table from which to delete the
item.
:param dict key_dict: A map of attribute names to ``AttributeValue``
objects, representing the primary key of the item to delete. For
the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide values for both the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *DeleteItem* to succeed. See the `AWS
documentation for ConditionExpression <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-Delete
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Return the item attributes as they appeared
before they were deleted.
.. _DeleteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_DeleteItem.html
"""
payload = {'TableName': table_name, 'Key': utils.marshall(key_dict)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('DeleteItem', payload)
def batch_get_item(self):
"""Invoke the `BatchGetItem`_ function.
.. _BatchGetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchGetItem.html
"""
raise NotImplementedError
def batch_write_item(self):
"""Invoke the `BatchWriteItem`_ function.
.. _BatchWriteItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_BatchWriteItem.html
"""
raise NotImplementedError
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload)
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload)
@gen.coroutine
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result))
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future
def _on_exception(self, error):
"""Handle exceptions that can not be retried.
:param error: The exception that was raised
:type error: sprockets_dynamodb.exceptions.DynamoDBException
"""
if not self._on_error:
raise error
self._on_error(error)
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception))
@staticmethod
@staticmethod
def _sleep_duration(attempt):
"""Calculates how long to sleep between exceptions. Returns a value
in seconds.
:param int attempt: The attempt number
:rtype: float
"""
return (float(2 ** attempt) * 100) / 1000
|
Danielhiversen/pymill
|
mill/__init__.py
|
set_heater_values
|
python
|
async def set_heater_values(heater_data, heater):
heater.current_temp = heater_data.get('currentTemp')
heater.device_status = heater_data.get('deviceStatus')
heater.available = heater.device_status == 0
heater.name = heater_data.get('deviceName')
heater.fan_status = heater_data.get('fanStatus')
heater.is_holiday = heater_data.get('isHoliday')
# Room assigned devices don't report canChangeTemp
# in selectDevice response.
if heater.room is None:
heater.can_change_temp = heater_data.get('canChangeTemp')
# Independent devices report their target temperature via
# holidayTemp value. But isHoliday is still set to 0.
# Room assigned devices may have set "Control Device individually"
# which effectively set their isHoliday value to 1.
# In this mode they behave similar to independent devices
# reporting their target temperature also via holidayTemp.
if heater.independent_device or heater.is_holiday == 1:
heater.set_temp = heater_data.get('holidayTemp')
elif heater.room is not None:
if heater.room.current_mode == 1:
heater.set_temp = heater.room.comfort_temp
elif heater.room.current_mode == 2:
heater.set_temp = heater.room.sleep_temp
elif heater.room.current_mode == 3:
heater.set_temp = heater.room.away_temp
heater.power_status = heater_data.get('powerStatus')
heater.tibber_control = heater_data.get('tibberControl')
heater.open_window = heater_data.get('open_window',
heater_data.get('open')
)
heater.is_heating = heater_data.get('heatStatus',
heater_data.get('heaterFlag')
)
try:
heater.sub_domain = int(float(heater_data.get('subDomain',
heater_data.get('subDomainId',
heater.sub_domain)
)))
except ValueError:
pass
|
Set heater values from heater data
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L445-L488
| null |
"""Library to handle connection with mill."""
# Based on https://pastebin.com/53Nk0wJA and Postman capturing from the app
# All requests are send unencrypted from the app :(
import asyncio
import datetime as dt
import hashlib
import json
import logging
import random
import string
import time
import aiohttp
import async_timeout
API_ENDPOINT_1 = 'https://eurouter.ablecloud.cn:9005/zc-account/v1/'
API_ENDPOINT_2 = 'https://eurouter.ablecloud.cn:9005/millService/v1/'
DEFAULT_TIMEOUT = 10
MIN_TIME_BETWEEN_UPDATES = dt.timedelta(seconds=2)
REQUEST_TIMEOUT = '300'
_LOGGER = logging.getLogger(__name__)
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
class Room:
"""Representation of room."""
# pylint: disable=too-few-public-methods
name = None
room_id = None
comfort_temp = None
away_temp = None
sleep_temp = None
is_offline = None
heat_status = None
home_name = None
avg_temp = None # current temperature in the room
current_mode = None
def __repr__(self):
items = ("%s=%r" % (k, v) for k, v in self.__dict__.items())
return "%s(%s)" % (self.__class__.__name__, ', '.join(items))
class Heater:
"""Representation of heater."""
# pylint: disable=too-few-public-methods
name = None
device_id = None
current_temp = None
set_temp = None
fan_status = None
power_status = None
independent_device = True
room = None
open_window = None
is_heating = None
tibber_control = None
sub_domain = 5332
available = False
is_holiday = None
can_change_temp = 1
@property
def is_gen1(self):
"""Check if heater is gen 1."""
return self.sub_domain in [863, ]
def __repr__(self):
items = ("%s=%r" % (k, v) for k, v in self.__dict__.items())
return "%s(%s)" % (self.__class__.__name__, ', '.join(items))
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.connect
|
python
|
async def connect(self, retry=2):
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
|
Connect to Mill.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L52-L100
|
[
"async def connect(self, retry=2):\n \"\"\"Connect to Mill.\"\"\"\n # pylint: disable=too-many-return-statements\n url = API_ENDPOINT_1 + 'login'\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n }\n payload = {\"account\": self._username,\n \"password\": self._password}\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except (asyncio.TimeoutError, aiohttp.ClientError):\n if retry < 1:\n _LOGGER.error(\"Error connecting to Mill\", exc_info=True)\n return False\n return await self.connect(retry - 1)\n\n result = await resp.text()\n if '\"errorCode\":3504' in result:\n _LOGGER.error('Wrong password')\n return False\n\n if '\"errorCode\":3501' in result:\n _LOGGER.error('Account does not exist')\n return False\n\n data = json.loads(result)\n token = data.get('token')\n if token is None:\n _LOGGER.error('No token')\n return False\n\n user_id = data.get('userId')\n if user_id is None:\n _LOGGER.error('No user id')\n return False\n\n self._token = token\n self._user_id = user_id\n return True\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.sync_connect
|
python
|
def sync_connect(self):
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
|
Close the Mill connection.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L102-L106
|
[
"async def connect(self, retry=2):\n \"\"\"Connect to Mill.\"\"\"\n # pylint: disable=too-many-return-statements\n url = API_ENDPOINT_1 + 'login'\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n }\n payload = {\"account\": self._username,\n \"password\": self._password}\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except (asyncio.TimeoutError, aiohttp.ClientError):\n if retry < 1:\n _LOGGER.error(\"Error connecting to Mill\", exc_info=True)\n return False\n return await self.connect(retry - 1)\n\n result = await resp.text()\n if '\"errorCode\":3504' in result:\n _LOGGER.error('Wrong password')\n return False\n\n if '\"errorCode\":3501' in result:\n _LOGGER.error('Account does not exist')\n return False\n\n data = json.loads(result)\n token = data.get('token')\n if token is None:\n _LOGGER.error('No token')\n return False\n\n user_id = data.get('userId')\n if user_id is None:\n _LOGGER.error('No user id')\n return False\n\n self._token = token\n self._user_id = user_id\n return True\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.sync_close_connection
|
python
|
def sync_close_connection(self):
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
|
Close the Mill connection.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L112-L116
|
[
"async def close_connection(self):\n \"\"\"Close the Mill connection.\"\"\"\n await self.websession.close()\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.request
|
python
|
async def request(self, command, payload, retry=3):
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
|
Request data.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L118-L191
|
[
"async def request(self, command, payload, retry=3):\n \"\"\"Request data.\"\"\"\n # pylint: disable=too-many-return-statements\n\n if self._token is None:\n _LOGGER.error(\"No token\")\n return None\n\n _LOGGER.debug(command, payload)\n\n nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))\n url = API_ENDPOINT_2 + command\n timestamp = int(time.time())\n signature = hashlib.sha1(str(REQUEST_TIMEOUT\n + str(timestamp)\n + nonce\n + self._token).encode(\"utf-8\")).hexdigest()\n\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n \"X-Zc-Timestamp\": str(timestamp),\n \"X-Zc-Timeout\": REQUEST_TIMEOUT,\n \"X-Zc-Nonce\": nonce,\n \"X-Zc-User-Id\": str(self._user_id),\n \"X-Zc-User-Signature\": signature,\n \"X-Zc-Content-Length\": str(len(payload)),\n }\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except asyncio.TimeoutError:\n if retry < 1:\n _LOGGER.error(\"Timed out sending command to Mill: %s\", command)\n return None\n return await self.request(command, payload, retry - 1)\n except aiohttp.ClientError:\n _LOGGER.error(\"Error sending command to Mill: %s\", command, exc_info=True)\n return None\n\n result = await resp.text()\n\n _LOGGER.debug(result)\n\n if not result or result == '{\"errorCode\":0}':\n return None\n\n if 'access token expire' in result or 'invalid signature' in result:\n if retry < 1:\n return None\n if not await self.connect():\n return None\n return await self.request(command, payload, retry - 1)\n\n if '\"error\":\"device offline\"' in result:\n if retry < 1:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n _LOGGER.debug(\"Failed to send request, %s. Retrying...\", result)\n await asyncio.sleep(3)\n return await self.request(command, payload, retry - 1)\n\n if 'errorCode' in result:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n data = json.loads(result)\n return data\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.sync_request
|
python
|
def sync_request(self, command, payload, retry=2):
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
|
Request data.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L193-L197
|
[
"async def request(self, command, payload, retry=3):\n \"\"\"Request data.\"\"\"\n # pylint: disable=too-many-return-statements\n\n if self._token is None:\n _LOGGER.error(\"No token\")\n return None\n\n _LOGGER.debug(command, payload)\n\n nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))\n url = API_ENDPOINT_2 + command\n timestamp = int(time.time())\n signature = hashlib.sha1(str(REQUEST_TIMEOUT\n + str(timestamp)\n + nonce\n + self._token).encode(\"utf-8\")).hexdigest()\n\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n \"X-Zc-Timestamp\": str(timestamp),\n \"X-Zc-Timeout\": REQUEST_TIMEOUT,\n \"X-Zc-Nonce\": nonce,\n \"X-Zc-User-Id\": str(self._user_id),\n \"X-Zc-User-Signature\": signature,\n \"X-Zc-Content-Length\": str(len(payload)),\n }\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except asyncio.TimeoutError:\n if retry < 1:\n _LOGGER.error(\"Timed out sending command to Mill: %s\", command)\n return None\n return await self.request(command, payload, retry - 1)\n except aiohttp.ClientError:\n _LOGGER.error(\"Error sending command to Mill: %s\", command, exc_info=True)\n return None\n\n result = await resp.text()\n\n _LOGGER.debug(result)\n\n if not result or result == '{\"errorCode\":0}':\n return None\n\n if 'access token expire' in result or 'invalid signature' in result:\n if retry < 1:\n return None\n if not await self.connect():\n return None\n return await self.request(command, payload, retry - 1)\n\n if '\"error\":\"device offline\"' in result:\n if retry < 1:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n _LOGGER.debug(\"Failed to send request, %s. Retrying...\", result)\n await asyncio.sleep(3)\n return await self.request(command, payload, retry - 1)\n\n if 'errorCode' in result:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n data = json.loads(result)\n return data\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.update_rooms
|
python
|
async def update_rooms(self):
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
|
Request data.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L206-L241
|
[
"async def request(self, command, payload, retry=3):\n \"\"\"Request data.\"\"\"\n # pylint: disable=too-many-return-statements\n\n if self._token is None:\n _LOGGER.error(\"No token\")\n return None\n\n _LOGGER.debug(command, payload)\n\n nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))\n url = API_ENDPOINT_2 + command\n timestamp = int(time.time())\n signature = hashlib.sha1(str(REQUEST_TIMEOUT\n + str(timestamp)\n + nonce\n + self._token).encode(\"utf-8\")).hexdigest()\n\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n \"X-Zc-Timestamp\": str(timestamp),\n \"X-Zc-Timeout\": REQUEST_TIMEOUT,\n \"X-Zc-Nonce\": nonce,\n \"X-Zc-User-Id\": str(self._user_id),\n \"X-Zc-User-Signature\": signature,\n \"X-Zc-Content-Length\": str(len(payload)),\n }\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except asyncio.TimeoutError:\n if retry < 1:\n _LOGGER.error(\"Timed out sending command to Mill: %s\", command)\n return None\n return await self.request(command, payload, retry - 1)\n except aiohttp.ClientError:\n _LOGGER.error(\"Error sending command to Mill: %s\", command, exc_info=True)\n return None\n\n result = await resp.text()\n\n _LOGGER.debug(result)\n\n if not result or result == '{\"errorCode\":0}':\n return None\n\n if 'access token expire' in result or 'invalid signature' in result:\n if retry < 1:\n return None\n if not await self.connect():\n return None\n return await self.request(command, payload, retry - 1)\n\n if '\"error\":\"device offline\"' in result:\n if retry < 1:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n _LOGGER.debug(\"Failed to send request, %s. Retrying...\", result)\n await asyncio.sleep(3)\n return await self.request(command, payload, retry - 1)\n\n if 'errorCode' in result:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n data = json.loads(result)\n return data\n",
"async def get_home_list(self):\n \"\"\"Request data.\"\"\"\n resp = await self.request(\"selectHomeList\", \"{}\")\n if resp is None:\n return []\n return resp.get('homeList', [])\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.sync_update_rooms
|
python
|
def sync_update_rooms(self):
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
|
Request data.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L243-L247
|
[
"async def update_rooms(self):\n \"\"\"Request data.\"\"\"\n homes = await self.get_home_list()\n for home in homes:\n payload = {\"homeId\": home.get(\"homeId\"), \"timeZoneNum\": \"+01:00\"}\n data = await self.request(\"selectRoombyHome\", payload)\n rooms = data.get('roomInfo', [])\n for _room in rooms:\n _id = _room.get('roomId')\n room = self.rooms.get(_id, Room())\n room.room_id = _id\n room.comfort_temp = _room.get(\"comfortTemp\")\n room.away_temp = _room.get(\"awayTemp\")\n room.sleep_temp = _room.get(\"sleepTemp\")\n room.name = _room.get(\"roomName\")\n room.current_mode = _room.get(\"currentMode\")\n room.heat_status = _room.get(\"heatStatus\")\n room.home_name = data.get(\"homeName\")\n room.avg_temp = _room.get(\"avgTemp\")\n\n self.rooms[_id] = room\n payload = {\"roomId\": _room.get(\"roomId\"), \"timeZoneNum\": \"+01:00\"}\n room_device = await self.request(\"selectDevicebyRoom\", payload)\n\n if room_device is None:\n continue\n heater_info = room_device.get('deviceInfo', [])\n for _heater in heater_info:\n _id = _heater.get('deviceId')\n heater = self.heaters.get(_id, Heater())\n heater.device_id = _id\n heater.independent_device = False\n heater.can_change_temp = _heater.get('canChangeTemp')\n heater.name = _heater.get('deviceName')\n heater.room = room\n self.heaters[_id] = heater\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.set_room_temperatures_by_name
|
python
|
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
|
Set room temps by name.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L249-L259
|
[
"async def set_room_temperatures(self, room_id, sleep_temp=None,\n comfort_temp=None, away_temp=None):\n \"\"\"Set room temps.\"\"\"\n if sleep_temp is None and comfort_temp is None and away_temp is None:\n return\n room = self.rooms.get(room_id)\n if room is None:\n _LOGGER.error(\"No such device\")\n return\n room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp\n room.away_temp = away_temp if away_temp else room.away_temp\n room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp\n payload = {\"roomId\": room_id,\n \"sleepTemp\": room.sleep_temp,\n \"comfortTemp\": room.comfort_temp,\n \"awayTemp\": room.away_temp,\n \"homeType\": 0}\n await self.request(\"changeRoomModeTempInfo\", payload)\n self.rooms[room_id] = room\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.set_room_temperatures
|
python
|
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
|
Set room temps.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L261-L279
|
[
"async def request(self, command, payload, retry=3):\n \"\"\"Request data.\"\"\"\n # pylint: disable=too-many-return-statements\n\n if self._token is None:\n _LOGGER.error(\"No token\")\n return None\n\n _LOGGER.debug(command, payload)\n\n nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))\n url = API_ENDPOINT_2 + command\n timestamp = int(time.time())\n signature = hashlib.sha1(str(REQUEST_TIMEOUT\n + str(timestamp)\n + nonce\n + self._token).encode(\"utf-8\")).hexdigest()\n\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n \"X-Zc-Timestamp\": str(timestamp),\n \"X-Zc-Timeout\": REQUEST_TIMEOUT,\n \"X-Zc-Nonce\": nonce,\n \"X-Zc-User-Id\": str(self._user_id),\n \"X-Zc-User-Signature\": signature,\n \"X-Zc-Content-Length\": str(len(payload)),\n }\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except asyncio.TimeoutError:\n if retry < 1:\n _LOGGER.error(\"Timed out sending command to Mill: %s\", command)\n return None\n return await self.request(command, payload, retry - 1)\n except aiohttp.ClientError:\n _LOGGER.error(\"Error sending command to Mill: %s\", command, exc_info=True)\n return None\n\n result = await resp.text()\n\n _LOGGER.debug(result)\n\n if not result or result == '{\"errorCode\":0}':\n return None\n\n if 'access token expire' in result or 'invalid signature' in result:\n if retry < 1:\n return None\n if not await self.connect():\n return None\n return await self.request(command, payload, retry - 1)\n\n if '\"error\":\"device offline\"' in result:\n if retry < 1:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n _LOGGER.debug(\"Failed to send request, %s. Retrying...\", result)\n await asyncio.sleep(3)\n return await self.request(command, payload, retry - 1)\n\n if 'errorCode' in result:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n data = json.loads(result)\n return data\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.update_heaters
|
python
|
async def update_heaters(self):
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
|
Request data.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L281-L308
|
[
"async def request(self, command, payload, retry=3):\n \"\"\"Request data.\"\"\"\n # pylint: disable=too-many-return-statements\n\n if self._token is None:\n _LOGGER.error(\"No token\")\n return None\n\n _LOGGER.debug(command, payload)\n\n nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))\n url = API_ENDPOINT_2 + command\n timestamp = int(time.time())\n signature = hashlib.sha1(str(REQUEST_TIMEOUT\n + str(timestamp)\n + nonce\n + self._token).encode(\"utf-8\")).hexdigest()\n\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n \"X-Zc-Timestamp\": str(timestamp),\n \"X-Zc-Timeout\": REQUEST_TIMEOUT,\n \"X-Zc-Nonce\": nonce,\n \"X-Zc-User-Id\": str(self._user_id),\n \"X-Zc-User-Signature\": signature,\n \"X-Zc-Content-Length\": str(len(payload)),\n }\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except asyncio.TimeoutError:\n if retry < 1:\n _LOGGER.error(\"Timed out sending command to Mill: %s\", command)\n return None\n return await self.request(command, payload, retry - 1)\n except aiohttp.ClientError:\n _LOGGER.error(\"Error sending command to Mill: %s\", command, exc_info=True)\n return None\n\n result = await resp.text()\n\n _LOGGER.debug(result)\n\n if not result or result == '{\"errorCode\":0}':\n return None\n\n if 'access token expire' in result or 'invalid signature' in result:\n if retry < 1:\n return None\n if not await self.connect():\n return None\n return await self.request(command, payload, retry - 1)\n\n if '\"error\":\"device offline\"' in result:\n if retry < 1:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n _LOGGER.debug(\"Failed to send request, %s. Retrying...\", result)\n await asyncio.sleep(3)\n return await self.request(command, payload, retry - 1)\n\n if 'errorCode' in result:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n data = json.loads(result)\n return data\n",
"async def get_home_list(self):\n \"\"\"Request data.\"\"\"\n resp = await self.request(\"selectHomeList\", \"{}\")\n if resp is None:\n return []\n return resp.get('homeList', [])\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.sync_update_heaters
|
python
|
def sync_update_heaters(self):
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
|
Request data.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L310-L314
|
[
"async def update_heaters(self):\n \"\"\"Request data.\"\"\"\n homes = await self.get_home_list()\n for home in homes:\n payload = {\"homeId\": home.get(\"homeId\")}\n data = await self.request(\"getIndependentDevices\", payload)\n if data is None:\n continue\n heater_data = data.get('deviceInfo', [])\n if not heater_data:\n continue\n for _heater in heater_data:\n _id = _heater.get('deviceId')\n heater = self.heaters.get(_id, Heater())\n heater.device_id = _id\n await set_heater_values(_heater, heater)\n self.heaters[_id] = heater\n\n for _id, heater in self.heaters.items():\n if heater.independent_device:\n continue\n payload = {\"deviceId\": _id}\n _heater = await self.request(\"selectDevice\", payload)\n if _heater is None:\n self.heaters[_id].available = False\n continue\n await set_heater_values(_heater, heater)\n self.heaters[_id] = heater\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.throttle_update_heaters
|
python
|
async def throttle_update_heaters(self):
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
|
Throttle update device.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L316-L322
|
[
"async def update_heaters(self):\n \"\"\"Request data.\"\"\"\n homes = await self.get_home_list()\n for home in homes:\n payload = {\"homeId\": home.get(\"homeId\")}\n data = await self.request(\"getIndependentDevices\", payload)\n if data is None:\n continue\n heater_data = data.get('deviceInfo', [])\n if not heater_data:\n continue\n for _heater in heater_data:\n _id = _heater.get('deviceId')\n heater = self.heaters.get(_id, Heater())\n heater.device_id = _id\n await set_heater_values(_heater, heater)\n self.heaters[_id] = heater\n\n for _id, heater in self.heaters.items():\n if heater.independent_device:\n continue\n payload = {\"deviceId\": _id}\n _heater = await self.request(\"selectDevice\", payload)\n if _heater is None:\n self.heaters[_id].available = False\n continue\n await set_heater_values(_heater, heater)\n self.heaters[_id] = heater\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.throttle_update_all_heaters
|
python
|
async def throttle_update_all_heaters(self):
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
|
Throttle update all devices and rooms.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L324-L331
|
[
"async def find_all_heaters(self):\n \"\"\"Find all heaters.\"\"\"\n await self.update_rooms()\n await self.update_heaters()\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.heater_control
|
python
|
async def heater_control(self, device_id, fan_status=None,
power_status=None):
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
|
Set heater temps.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L343-L364
|
[
"async def request(self, command, payload, retry=3):\n \"\"\"Request data.\"\"\"\n # pylint: disable=too-many-return-statements\n\n if self._token is None:\n _LOGGER.error(\"No token\")\n return None\n\n _LOGGER.debug(command, payload)\n\n nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))\n url = API_ENDPOINT_2 + command\n timestamp = int(time.time())\n signature = hashlib.sha1(str(REQUEST_TIMEOUT\n + str(timestamp)\n + nonce\n + self._token).encode(\"utf-8\")).hexdigest()\n\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n \"X-Zc-Timestamp\": str(timestamp),\n \"X-Zc-Timeout\": REQUEST_TIMEOUT,\n \"X-Zc-Nonce\": nonce,\n \"X-Zc-User-Id\": str(self._user_id),\n \"X-Zc-User-Signature\": signature,\n \"X-Zc-Content-Length\": str(len(payload)),\n }\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except asyncio.TimeoutError:\n if retry < 1:\n _LOGGER.error(\"Timed out sending command to Mill: %s\", command)\n return None\n return await self.request(command, payload, retry - 1)\n except aiohttp.ClientError:\n _LOGGER.error(\"Error sending command to Mill: %s\", command, exc_info=True)\n return None\n\n result = await resp.text()\n\n _LOGGER.debug(result)\n\n if not result or result == '{\"errorCode\":0}':\n return None\n\n if 'access token expire' in result or 'invalid signature' in result:\n if retry < 1:\n return None\n if not await self.connect():\n return None\n return await self.request(command, payload, retry - 1)\n\n if '\"error\":\"device offline\"' in result:\n if retry < 1:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n _LOGGER.debug(\"Failed to send request, %s. Retrying...\", result)\n await asyncio.sleep(3)\n return await self.request(command, payload, retry - 1)\n\n if 'errorCode' in result:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n data = json.loads(result)\n return data\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.sync_heater_control
|
python
|
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
|
Set heater temps.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L366-L373
|
[
"async def heater_control(self, device_id, fan_status=None,\n power_status=None):\n \"\"\"Set heater temps.\"\"\"\n heater = self.heaters.get(device_id)\n if heater is None:\n _LOGGER.error(\"No such device\")\n return\n if fan_status is None:\n fan_status = heater.fan_status\n if power_status is None:\n power_status = heater.power_status\n operation = 0 if fan_status == heater.fan_status else 4\n payload = {\"subDomain\": heater.sub_domain,\n \"deviceId\": device_id,\n \"testStatus\": 1,\n \"operation\": operation,\n \"status\": power_status,\n \"windStatus\": fan_status,\n \"holdTemp\": heater.set_temp,\n \"tempType\": 0,\n \"powerLevel\": 0}\n await self.request(\"deviceControl\", payload)\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.set_heater_temp
|
python
|
async def set_heater_temp(self, device_id, set_temp):
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
|
Set heater temp.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L375-L382
|
[
"async def request(self, command, payload, retry=3):\n \"\"\"Request data.\"\"\"\n # pylint: disable=too-many-return-statements\n\n if self._token is None:\n _LOGGER.error(\"No token\")\n return None\n\n _LOGGER.debug(command, payload)\n\n nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))\n url = API_ENDPOINT_2 + command\n timestamp = int(time.time())\n signature = hashlib.sha1(str(REQUEST_TIMEOUT\n + str(timestamp)\n + nonce\n + self._token).encode(\"utf-8\")).hexdigest()\n\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n \"X-Zc-Timestamp\": str(timestamp),\n \"X-Zc-Timeout\": REQUEST_TIMEOUT,\n \"X-Zc-Nonce\": nonce,\n \"X-Zc-User-Id\": str(self._user_id),\n \"X-Zc-User-Signature\": signature,\n \"X-Zc-Content-Length\": str(len(payload)),\n }\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except asyncio.TimeoutError:\n if retry < 1:\n _LOGGER.error(\"Timed out sending command to Mill: %s\", command)\n return None\n return await self.request(command, payload, retry - 1)\n except aiohttp.ClientError:\n _LOGGER.error(\"Error sending command to Mill: %s\", command, exc_info=True)\n return None\n\n result = await resp.text()\n\n _LOGGER.debug(result)\n\n if not result or result == '{\"errorCode\":0}':\n return None\n\n if 'access token expire' in result or 'invalid signature' in result:\n if retry < 1:\n return None\n if not await self.connect():\n return None\n return await self.request(command, payload, retry - 1)\n\n if '\"error\":\"device offline\"' in result:\n if retry < 1:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n _LOGGER.debug(\"Failed to send request, %s. Retrying...\", result)\n await asyncio.sleep(3)\n return await self.request(command, payload, retry - 1)\n\n if 'errorCode' in result:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n data = json.loads(result)\n return data\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
def sync_set_heater_temp(self, device_id, set_temp):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
Danielhiversen/pymill
|
mill/__init__.py
|
Mill.sync_set_heater_temp
|
python
|
def sync_set_heater_temp(self, device_id, set_temp):
loop = asyncio.get_event_loop()
task = loop.create_task(self.set_heater_temp(device_id, set_temp))
loop.run_until_complete(task)
|
Set heater temps.
|
train
|
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L384-L388
|
[
"async def set_heater_temp(self, device_id, set_temp):\n \"\"\"Set heater temp.\"\"\"\n payload = {\"homeType\": 0,\n \"timeZoneNum\": \"+02:00\",\n \"deviceId\": device_id,\n \"value\": int(set_temp),\n \"key\": \"holidayTemp\"}\n await self.request(\"changeDeviceInfo\", payload)\n"
] |
class Mill:
"""Class to comunicate with the Mill api."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self, username, password,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Mill connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self._username = username
self._password = password
self._user_id = None
self._token = None
self.rooms = {}
self.heaters = {}
self._throttle_time = None
self._throttle_all_time = None
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True
def sync_connect(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.connect())
loop.run_until_complete(task)
async def close_connection(self):
"""Close the Mill connection."""
await self.websession.close()
def sync_close_connection(self):
"""Close the Mill connection."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.close_connection())
loop.run_until_complete(task)
async def request(self, command, payload, retry=3):
"""Request data."""
# pylint: disable=too-many-return-statements
if self._token is None:
_LOGGER.error("No token")
return None
_LOGGER.debug(command, payload)
nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
url = API_ENDPOINT_2 + command
timestamp = int(time.time())
signature = hashlib.sha1(str(REQUEST_TIMEOUT
+ str(timestamp)
+ nonce
+ self._token).encode("utf-8")).hexdigest()
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
"X-Zc-Timestamp": str(timestamp),
"X-Zc-Timeout": REQUEST_TIMEOUT,
"X-Zc-Nonce": nonce,
"X-Zc-User-Id": str(self._user_id),
"X-Zc-User-Signature": signature,
"X-Zc-Content-Length": str(len(payload)),
}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Mill: %s", command)
return None
return await self.request(command, payload, retry - 1)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Mill: %s", command, exc_info=True)
return None
result = await resp.text()
_LOGGER.debug(result)
if not result or result == '{"errorCode":0}':
return None
if 'access token expire' in result or 'invalid signature' in result:
if retry < 1:
return None
if not await self.connect():
return None
return await self.request(command, payload, retry - 1)
if '"error":"device offline"' in result:
if retry < 1:
_LOGGER.error("Failed to send request, %s", result)
return None
_LOGGER.debug("Failed to send request, %s. Retrying...", result)
await asyncio.sleep(3)
return await self.request(command, payload, retry - 1)
if 'errorCode' in result:
_LOGGER.error("Failed to send request, %s", result)
return None
data = json.loads(result)
return data
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task)
async def get_home_list(self):
"""Request data."""
resp = await self.request("selectHomeList", "{}")
if resp is None:
return []
return resp.get('homeList', [])
async def update_rooms(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"}
data = await self.request("selectRoombyHome", payload)
rooms = data.get('roomInfo', [])
for _room in rooms:
_id = _room.get('roomId')
room = self.rooms.get(_id, Room())
room.room_id = _id
room.comfort_temp = _room.get("comfortTemp")
room.away_temp = _room.get("awayTemp")
room.sleep_temp = _room.get("sleepTemp")
room.name = _room.get("roomName")
room.current_mode = _room.get("currentMode")
room.heat_status = _room.get("heatStatus")
room.home_name = data.get("homeName")
room.avg_temp = _room.get("avgTemp")
self.rooms[_id] = room
payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"}
room_device = await self.request("selectDevicebyRoom", payload)
if room_device is None:
continue
heater_info = room_device.get('deviceInfo', [])
for _heater in heater_info:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
heater.independent_device = False
heater.can_change_temp = _heater.get('canChangeTemp')
heater.name = _heater.get('deviceName')
heater.room = room
self.heaters[_id] = heater
def sync_update_rooms(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_rooms())
return loop.run_until_complete(task)
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps by name."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
for room_id, _room in self.rooms.items():
if _room.name == room_name:
await self.set_room_temperatures(room_id, sleep_temp,
comfort_temp, away_temp)
return
_LOGGER.error("Could not find a room with name %s", room_name)
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room
async def update_heaters(self):
"""Request data."""
homes = await self.get_home_list()
for home in homes:
payload = {"homeId": home.get("homeId")}
data = await self.request("getIndependentDevices", payload)
if data is None:
continue
heater_data = data.get('deviceInfo', [])
if not heater_data:
continue
for _heater in heater_data:
_id = _heater.get('deviceId')
heater = self.heaters.get(_id, Heater())
heater.device_id = _id
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
for _id, heater in self.heaters.items():
if heater.independent_device:
continue
payload = {"deviceId": _id}
_heater = await self.request("selectDevice", payload)
if _heater is None:
self.heaters[_id].available = False
continue
await set_heater_values(_heater, heater)
self.heaters[_id] = heater
def sync_update_heaters(self):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_heaters())
loop.run_until_complete(task)
async def throttle_update_heaters(self):
"""Throttle update device."""
if (self._throttle_time is not None
and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_time = dt.datetime.now()
await self.update_heaters()
async def throttle_update_all_heaters(self):
"""Throttle update all devices and rooms."""
if (self._throttle_all_time is not None
and dt.datetime.now() - self._throttle_all_time
< MIN_TIME_BETWEEN_UPDATES):
return
self._throttle_all_time = dt.datetime.now()
await self.find_all_heaters()
async def update_device(self, device_id):
"""Update device."""
await self.throttle_update_heaters()
return self.heaters.get(device_id)
async def update_room(self, room_id):
"""Update room."""
await self.throttle_update_all_heaters()
return self.rooms.get(room_id)
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
def sync_heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.heater_control(device_id,
fan_status,
power_status))
loop.run_until_complete(task)
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
async def find_all_heaters(self):
"""Find all heaters."""
await self.update_rooms()
await self.update_heaters()
|
internetarchive/warc
|
warc/__init__.py
|
open
|
python
|
def open(filename, mode="rb", format = None):
if format == "auto" or format == None:
format = detect_format(filename)
if format == "warc":
return WARCFile(filename, mode)
elif format == "arc":
return ARCFile(filename, mode)
else:
raise IOError("Don't know how to open '%s' files"%format)
|
Shorthand for WARCFile(filename, mode).
Auto detects file and opens it.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/__init__.py#L24-L38
|
[
"def detect_format(filename):\n \"\"\"Tries to figure out the type of the file. Return 'warc' for\n WARC files and 'arc' for ARC files\"\"\"\n\n if \".arc\" in filename:\n return \"arc\"\n if \".warc\" in filename:\n return \"warc\"\n\n return \"unknown\"\n"
] |
"""
warc
~~~~
Python library to work with WARC files.
:copyright: (c) 2012 Internet Archive
"""
from .arc import ARCFile, ARCRecord, ARCHeader
from .warc import WARCFile, WARCRecord, WARCHeader, WARCReader
def detect_format(filename):
"""Tries to figure out the type of the file. Return 'warc' for
WARC files and 'arc' for ARC files"""
if ".arc" in filename:
return "arc"
if ".warc" in filename:
return "warc"
return "unknown"
|
internetarchive/warc
|
warc/warc.py
|
WARCHeader.init_defaults
|
python
|
def init_defaults(self):
if "WARC-Record-ID" not in self:
self['WARC-Record-ID'] = "<urn:uuid:%s>" % uuid.uuid1()
if "WARC-Date" not in self:
self['WARC-Date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if "Content-Type" not in self:
self['Content-Type'] = WARCHeader.CONTENT_TYPES.get(self.type, "application/octet-stream")
|
Initializes important headers to default values, if not already specified.
The WARC-Record-ID header is set to a newly generated UUID.
The WARC-Date header is set to the current datetime.
The Content-Type is set based on the WARC-Type header.
The Content-Length is initialized to 0.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L75-L88
| null |
class WARCHeader(CaseInsensitiveDict):
"""The WARC Header object represents the headers of a WARC record.
It provides dictionary like interface for accessing the headers.
The following mandatory fields are accessible also as attributes.
* h.record_id == h['WARC-Record-ID']
* h.content_length == int(h['Content-Length'])
* h.date == h['WARC-Date']
* h.type == h['WARC-Type']
:params headers: dictionary of headers.
:params defaults: If True, important headers like WARC-Record-ID,
WARC-Date, Content-Type and Content-Length are
initialized to automatically if not already present.
TODO:
List of attributes needed to make WARCHeaders look like ARC files
* url
* ip_address
* date (date of archival)
* content_type
* result_code (response code)
* checksum
* location
* offset (offset from beginning of file to recrod)
* filename (name of arc file)
* length (length of the n/w doc in bytes)
"""
CONTENT_TYPES = dict(warcinfo='application/warc-fields',
response='application/http; msgtype=response',
request='application/http; msgtype=request',
metadata='application/warc-fields')
KNOWN_HEADERS = {
"type": "WARC-Type",
"date": "WARC-Date",
"record_id": "WARC-Record-ID",
"ip_address": "WARC-IP-Address",
"target_uri": "WARC-Target-URI",
"warcinfo_id": "WARC-Warcinfo-ID",
"request_uri": "WARC-Request-URI",
"content_type": "Content-Type",
"content_length": "Content-Length"
}
def __init__(self, headers, defaults=False):
self.version = "WARC/1.0"
CaseInsensitiveDict.__init__(self, headers)
if defaults:
self.init_defaults()
def init_defaults(self):
"""Initializes important headers to default values, if not already specified.
The WARC-Record-ID header is set to a newly generated UUID.
The WARC-Date header is set to the current datetime.
The Content-Type is set based on the WARC-Type header.
The Content-Length is initialized to 0.
"""
if "WARC-Record-ID" not in self:
self['WARC-Record-ID'] = "<urn:uuid:%s>" % uuid.uuid1()
if "WARC-Date" not in self:
self['WARC-Date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if "Content-Type" not in self:
self['Content-Type'] = WARCHeader.CONTENT_TYPES.get(self.type, "application/octet-stream")
def write_to(self, f):
"""Writes this header to a file, in the format specified by WARC.
"""
f.write(self.version + "\r\n")
for name, value in self.items():
name = name.title()
# Use standard forms for commonly used patterns
name = name.replace("Warc-", "WARC-").replace("-Ip-", "-IP-").replace("-Id", "-ID").replace("-Uri", "-URI")
f.write(name)
f.write(": ")
f.write(value)
f.write("\r\n")
# Header ends with an extra CRLF
f.write("\r\n")
@property
def content_length(self):
"""The Content-Length header as int."""
return int(self['Content-Length'])
@property
def type(self):
"""The value of WARC-Type header."""
return self.get('WARC-Type')
@property
def record_id(self):
"""The value of WARC-Record-ID header."""
return self['WARC-Record-ID']
@property
def date(self):
"""The value of WARC-Date header."""
return self['WARC-Date']
def __str__(self):
f = StringIO()
self.write_to(f)
return f.getvalue()
def __repr__(self):
return "<WARCHeader: type=%r, record_id=%r>" % (self.type, self.record_id)
|
internetarchive/warc
|
warc/warc.py
|
WARCHeader.write_to
|
python
|
def write_to(self, f):
f.write(self.version + "\r\n")
for name, value in self.items():
name = name.title()
# Use standard forms for commonly used patterns
name = name.replace("Warc-", "WARC-").replace("-Ip-", "-IP-").replace("-Id", "-ID").replace("-Uri", "-URI")
f.write(name)
f.write(": ")
f.write(value)
f.write("\r\n")
# Header ends with an extra CRLF
f.write("\r\n")
|
Writes this header to a file, in the format specified by WARC.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L90-L104
| null |
class WARCHeader(CaseInsensitiveDict):
"""The WARC Header object represents the headers of a WARC record.
It provides dictionary like interface for accessing the headers.
The following mandatory fields are accessible also as attributes.
* h.record_id == h['WARC-Record-ID']
* h.content_length == int(h['Content-Length'])
* h.date == h['WARC-Date']
* h.type == h['WARC-Type']
:params headers: dictionary of headers.
:params defaults: If True, important headers like WARC-Record-ID,
WARC-Date, Content-Type and Content-Length are
initialized to automatically if not already present.
TODO:
List of attributes needed to make WARCHeaders look like ARC files
* url
* ip_address
* date (date of archival)
* content_type
* result_code (response code)
* checksum
* location
* offset (offset from beginning of file to recrod)
* filename (name of arc file)
* length (length of the n/w doc in bytes)
"""
CONTENT_TYPES = dict(warcinfo='application/warc-fields',
response='application/http; msgtype=response',
request='application/http; msgtype=request',
metadata='application/warc-fields')
KNOWN_HEADERS = {
"type": "WARC-Type",
"date": "WARC-Date",
"record_id": "WARC-Record-ID",
"ip_address": "WARC-IP-Address",
"target_uri": "WARC-Target-URI",
"warcinfo_id": "WARC-Warcinfo-ID",
"request_uri": "WARC-Request-URI",
"content_type": "Content-Type",
"content_length": "Content-Length"
}
def __init__(self, headers, defaults=False):
self.version = "WARC/1.0"
CaseInsensitiveDict.__init__(self, headers)
if defaults:
self.init_defaults()
def init_defaults(self):
"""Initializes important headers to default values, if not already specified.
The WARC-Record-ID header is set to a newly generated UUID.
The WARC-Date header is set to the current datetime.
The Content-Type is set based on the WARC-Type header.
The Content-Length is initialized to 0.
"""
if "WARC-Record-ID" not in self:
self['WARC-Record-ID'] = "<urn:uuid:%s>" % uuid.uuid1()
if "WARC-Date" not in self:
self['WARC-Date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if "Content-Type" not in self:
self['Content-Type'] = WARCHeader.CONTENT_TYPES.get(self.type, "application/octet-stream")
def write_to(self, f):
"""Writes this header to a file, in the format specified by WARC.
"""
f.write(self.version + "\r\n")
for name, value in self.items():
name = name.title()
# Use standard forms for commonly used patterns
name = name.replace("Warc-", "WARC-").replace("-Ip-", "-IP-").replace("-Id", "-ID").replace("-Uri", "-URI")
f.write(name)
f.write(": ")
f.write(value)
f.write("\r\n")
# Header ends with an extra CRLF
f.write("\r\n")
@property
def content_length(self):
"""The Content-Length header as int."""
return int(self['Content-Length'])
@property
def type(self):
"""The value of WARC-Type header."""
return self.get('WARC-Type')
@property
def record_id(self):
"""The value of WARC-Record-ID header."""
return self['WARC-Record-ID']
@property
def date(self):
"""The value of WARC-Date header."""
return self['WARC-Date']
def __str__(self):
f = StringIO()
self.write_to(f)
return f.getvalue()
def __repr__(self):
return "<WARCHeader: type=%r, record_id=%r>" % (self.type, self.record_id)
|
internetarchive/warc
|
warc/warc.py
|
WARCRecord.from_response
|
python
|
def from_response(response):
# Get the httplib.HTTPResponse object
http_response = response.raw._original_response
# HTTP status line, headers and body as strings
status_line = "HTTP/1.1 %d %s" % (http_response.status, http_response.reason)
headers = str(http_response.msg)
body = http_response.read()
# Monkey-patch the response object so that it is possible to read from it later.
response.raw._fp = StringIO(body)
# Build the payload to create warc file.
payload = status_line + "\r\n" + headers + "\r\n" + body
headers = {
"WARC-Type": "response",
"WARC-Target-URI": response.request.full_url.encode('utf-8')
}
return WARCRecord(payload=payload, headers=headers)
|
Creates a WARCRecord from given response object.
This must be called before reading the response. The response can be
read after this method is called.
:param response: An instance of :class:`requests.models.Response`.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L216-L242
| null |
class WARCRecord(object):
"""The WARCRecord object represents a WARC Record.
"""
def __init__(self, header=None, payload=None, headers={}, defaults=True):
"""Creates a new WARC record.
"""
if header is None and defaults is True:
headers.setdefault("WARC-Type", "response")
self.header = header or WARCHeader(headers, defaults=True)
self.payload = payload
if defaults is True and 'Content-Length' not in self.header:
if payload:
self.header['Content-Length'] = str(len(payload))
else:
self.header['Content-Length'] = "0"
if defaults is True and 'WARC-Payload-Digest' not in self.header:
self.header['WARC-Payload-Digest'] = self._compute_digest(payload)
def _compute_digest(self, payload):
return "sha1:" + hashlib.sha1(payload).hexdigest()
def write_to(self, f):
self.header.write_to(f)
f.write(self.payload)
f.write("\r\n")
f.write("\r\n")
f.flush()
@property
def type(self):
"""Record type"""
return self.header.type
@property
def url(self):
"""The value of the WARC-Target-URI header if the record is of type "response"."""
return self.header.get('WARC-Target-URI')
@property
def ip_address(self):
"""The IP address of the host contacted to retrieve the content of this record.
This value is available from the WARC-IP-Address header."""
return self.header.get('WARC-IP-Address')
@property
def date(self):
"""UTC timestamp of the record."""
return self.header.get("WARC-Date")
@property
def checksum(self):
return self.header.get('WARC-Payload-Digest')
@property
def offset(self):
"""Offset of this record in the warc file from which this record is read.
"""
pass
def __getitem__(self, name):
return self.header[name]
def __setitem__(self, name, value):
self.header[name] = value
def __contains__(self, name):
return name in self.header
def __str__(self):
f = StringIO()
self.write_to(f)
return f.getvalue()
def __repr__(self):
return "<WARCRecord: type=%r record_id=%s>" % (self.type, self['WARC-Record-ID'])
@staticmethod
def from_response(response):
"""Creates a WARCRecord from given response object.
This must be called before reading the response. The response can be
read after this method is called.
:param response: An instance of :class:`requests.models.Response`.
"""
# Get the httplib.HTTPResponse object
http_response = response.raw._original_response
# HTTP status line, headers and body as strings
status_line = "HTTP/1.1 %d %s" % (http_response.status, http_response.reason)
headers = str(http_response.msg)
body = http_response.read()
# Monkey-patch the response object so that it is possible to read from it later.
response.raw._fp = StringIO(body)
# Build the payload to create warc file.
payload = status_line + "\r\n" + headers + "\r\n" + body
headers = {
"WARC-Type": "response",
"WARC-Target-URI": response.request.full_url.encode('utf-8')
}
return WARCRecord(payload=payload, headers=headers)
|
internetarchive/warc
|
warc/warc.py
|
WARCFile.write_record
|
python
|
def write_record(self, warc_record):
warc_record.write_to(self.fileobj)
# Each warc record is written as separate member in the gzip file
# so that each record can be read independetly.
if isinstance(self.fileobj, gzip2.GzipFile):
self.fileobj.close_member()
|
Adds a warc record to this WARC file.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L265-L272
|
[
"def close_member(self):\n \"\"\"Closes the current member being written.\n \"\"\"\n # The new member is not yet started, no need to close\n if self._new_member:\n return\n\n self.fileobj.write(self.compress.flush())\n write32u(self.fileobj, self.crc)\n # self.size may exceed 2GB, or even 4GB\n write32u(self.fileobj, self.size & 0xffffffffL)\n self.size = 0\n self.compress = zlib.compressobj(9,\n zlib.DEFLATED,\n -zlib.MAX_WBITS,\n zlib.DEF_MEM_LEVEL,\n 0)\n self._new_member = True\n",
"def write_to(self, f):\n self.header.write_to(f)\n f.write(self.payload)\n f.write(\"\\r\\n\")\n f.write(\"\\r\\n\")\n f.flush()\n"
] |
class WARCFile:
def __init__(self, filename=None, mode=None, fileobj=None, compress=None):
if fileobj is None:
fileobj = __builtin__.open(filename, mode or "rb")
mode = fileobj.mode
# initiaize compress based on filename, if not already specified
if compress is None and filename and filename.endswith(".gz"):
compress = True
if compress:
fileobj = gzip2.GzipFile(fileobj=fileobj, mode=mode)
self.fileobj = fileobj
self._reader = None
@property
def reader(self):
if self._reader is None:
self._reader = WARCReader(self.fileobj)
return self._reader
def read_record(self):
"""Reads a warc record from this WARC file."""
return self.reader.read_record()
def __iter__(self):
return iter(self.reader)
def close(self):
self.fileobj.close()
def browse(self):
"""Utility to browse through the records in the warc file.
This returns an iterator over (record, offset, size) for each record in
the file. If the file is gzip compressed, the offset and size will
corresponds to the compressed file.
The payload of each record is limited to 1MB to keep memory consumption
under control.
"""
offset = 0
for record in self.reader:
# Just read the first 1MB of the payload.
# This will make sure memory consuption is under control and it
# is possible to look at the first MB of the payload, which is
# typically sufficient to read http headers in the payload.
record.payload = StringIO(record.payload.read(1024*1024))
self.reader.finish_reading_current_record()
next_offset = self.tell()
yield record, offset, next_offset-offset
offset = next_offset
def tell(self):
"""Returns the file offset. If this is a compressed file, then the
offset in the compressed file is returned.
"""
if isinstance(self.fileobj, gzip2.GzipFile):
return self.fileobj.fileobj.tell()
else:
return self.fileobj.tell()
|
internetarchive/warc
|
warc/warc.py
|
WARCFile.browse
|
python
|
def browse(self):
offset = 0
for record in self.reader:
# Just read the first 1MB of the payload.
# This will make sure memory consuption is under control and it
# is possible to look at the first MB of the payload, which is
# typically sufficient to read http headers in the payload.
record.payload = StringIO(record.payload.read(1024*1024))
self.reader.finish_reading_current_record()
next_offset = self.tell()
yield record, offset, next_offset-offset
offset = next_offset
|
Utility to browse through the records in the warc file.
This returns an iterator over (record, offset, size) for each record in
the file. If the file is gzip compressed, the offset and size will
corresponds to the compressed file.
The payload of each record is limited to 1MB to keep memory consumption
under control.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L284-L304
|
[
"def tell(self):\n \"\"\"Returns the file offset. If this is a compressed file, then the \n offset in the compressed file is returned.\n \"\"\"\n if isinstance(self.fileobj, gzip2.GzipFile):\n return self.fileobj.fileobj.tell()\n else:\n return self.fileobj.tell() \n"
] |
class WARCFile:
def __init__(self, filename=None, mode=None, fileobj=None, compress=None):
if fileobj is None:
fileobj = __builtin__.open(filename, mode or "rb")
mode = fileobj.mode
# initiaize compress based on filename, if not already specified
if compress is None and filename and filename.endswith(".gz"):
compress = True
if compress:
fileobj = gzip2.GzipFile(fileobj=fileobj, mode=mode)
self.fileobj = fileobj
self._reader = None
@property
def reader(self):
if self._reader is None:
self._reader = WARCReader(self.fileobj)
return self._reader
def write_record(self, warc_record):
"""Adds a warc record to this WARC file.
"""
warc_record.write_to(self.fileobj)
# Each warc record is written as separate member in the gzip file
# so that each record can be read independetly.
if isinstance(self.fileobj, gzip2.GzipFile):
self.fileobj.close_member()
def read_record(self):
"""Reads a warc record from this WARC file."""
return self.reader.read_record()
def __iter__(self):
return iter(self.reader)
def close(self):
self.fileobj.close()
def browse(self):
"""Utility to browse through the records in the warc file.
This returns an iterator over (record, offset, size) for each record in
the file. If the file is gzip compressed, the offset and size will
corresponds to the compressed file.
The payload of each record is limited to 1MB to keep memory consumption
under control.
"""
offset = 0
for record in self.reader:
# Just read the first 1MB of the payload.
# This will make sure memory consuption is under control and it
# is possible to look at the first MB of the payload, which is
# typically sufficient to read http headers in the payload.
record.payload = StringIO(record.payload.read(1024*1024))
self.reader.finish_reading_current_record()
next_offset = self.tell()
yield record, offset, next_offset-offset
offset = next_offset
def tell(self):
"""Returns the file offset. If this is a compressed file, then the
offset in the compressed file is returned.
"""
if isinstance(self.fileobj, gzip2.GzipFile):
return self.fileobj.fileobj.tell()
else:
return self.fileobj.tell()
|
internetarchive/warc
|
warc/warc.py
|
WARCFile.tell
|
python
|
def tell(self):
if isinstance(self.fileobj, gzip2.GzipFile):
return self.fileobj.fileobj.tell()
else:
return self.fileobj.tell()
|
Returns the file offset. If this is a compressed file, then the
offset in the compressed file is returned.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L306-L313
| null |
class WARCFile:
def __init__(self, filename=None, mode=None, fileobj=None, compress=None):
if fileobj is None:
fileobj = __builtin__.open(filename, mode or "rb")
mode = fileobj.mode
# initiaize compress based on filename, if not already specified
if compress is None and filename and filename.endswith(".gz"):
compress = True
if compress:
fileobj = gzip2.GzipFile(fileobj=fileobj, mode=mode)
self.fileobj = fileobj
self._reader = None
@property
def reader(self):
if self._reader is None:
self._reader = WARCReader(self.fileobj)
return self._reader
def write_record(self, warc_record):
"""Adds a warc record to this WARC file.
"""
warc_record.write_to(self.fileobj)
# Each warc record is written as separate member in the gzip file
# so that each record can be read independetly.
if isinstance(self.fileobj, gzip2.GzipFile):
self.fileobj.close_member()
def read_record(self):
"""Reads a warc record from this WARC file."""
return self.reader.read_record()
def __iter__(self):
return iter(self.reader)
def close(self):
self.fileobj.close()
def browse(self):
"""Utility to browse through the records in the warc file.
This returns an iterator over (record, offset, size) for each record in
the file. If the file is gzip compressed, the offset and size will
corresponds to the compressed file.
The payload of each record is limited to 1MB to keep memory consumption
under control.
"""
offset = 0
for record in self.reader:
# Just read the first 1MB of the payload.
# This will make sure memory consuption is under control and it
# is possible to look at the first MB of the payload, which is
# typically sufficient to read http headers in the payload.
record.payload = StringIO(record.payload.read(1024*1024))
self.reader.finish_reading_current_record()
next_offset = self.tell()
yield record, offset, next_offset-offset
offset = next_offset
|
internetarchive/warc
|
warc/gzip2.py
|
GzipFile.close_member
|
python
|
def close_member(self):
# The new member is not yet started, no need to close
if self._new_member:
return
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.size = 0
self.compress = zlib.compressobj(9,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._new_member = True
|
Closes the current member being written.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L42-L59
| null |
class GzipFile(BaseGzipFile):
"""GzipFile with support for multi-member gzip files.
"""
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
BaseGzipFile.__init__(self,
filename=filename,
mode=mode,
compresslevel=compresslevel,
fileobj=fileobj)
if self.mode == WRITE:
# Indicates the start of a new member if value is True.
# The BaseGzipFile constructor already wrote the header for new
# member, so marking as False.
self._new_member = False
# When _member_lock is True, only one member in gzip file is read
self._member_lock = False
def close_member(self):
"""Closes the current member being written.
"""
# The new member is not yet started, no need to close
if self._new_member:
return
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.size = 0
self.compress = zlib.compressobj(9,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._new_member = True
def _start_member(self):
"""Starts writing a new member if required.
"""
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False
def write(self, data):
self._start_member()
BaseGzipFile.write(self, data)
def close(self):
"""Closes the gzip with care to handle multiple members.
"""
if self.fileobj is None:
return
if self.mode == WRITE:
self.close_member()
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def _read(self, size):
# Treat end of member as end of file when _member_lock flag is set
if self._member_lock and self._new_member:
raise EOFError()
else:
return BaseGzipFile._read(self, size)
def read_member(self):
"""Returns a file-like object to read one member from the gzip file.
"""
if self._member_lock is False:
self._member_lock = True
if self._new_member:
try:
# Read one byte to move to the next member
BaseGzipFile._read(self, 1)
assert self._new_member is False
except EOFError:
return None
return self
def write_member(self, data):
"""Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object.
"""
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member()
|
internetarchive/warc
|
warc/gzip2.py
|
GzipFile._start_member
|
python
|
def _start_member(self):
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False
|
Starts writing a new member if required.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L61-L67
| null |
class GzipFile(BaseGzipFile):
"""GzipFile with support for multi-member gzip files.
"""
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
BaseGzipFile.__init__(self,
filename=filename,
mode=mode,
compresslevel=compresslevel,
fileobj=fileobj)
if self.mode == WRITE:
# Indicates the start of a new member if value is True.
# The BaseGzipFile constructor already wrote the header for new
# member, so marking as False.
self._new_member = False
# When _member_lock is True, only one member in gzip file is read
self._member_lock = False
def close_member(self):
"""Closes the current member being written.
"""
# The new member is not yet started, no need to close
if self._new_member:
return
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.size = 0
self.compress = zlib.compressobj(9,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._new_member = True
def write(self, data):
self._start_member()
BaseGzipFile.write(self, data)
def close(self):
"""Closes the gzip with care to handle multiple members.
"""
if self.fileobj is None:
return
if self.mode == WRITE:
self.close_member()
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def _read(self, size):
# Treat end of member as end of file when _member_lock flag is set
if self._member_lock and self._new_member:
raise EOFError()
else:
return BaseGzipFile._read(self, size)
def read_member(self):
"""Returns a file-like object to read one member from the gzip file.
"""
if self._member_lock is False:
self._member_lock = True
if self._new_member:
try:
# Read one byte to move to the next member
BaseGzipFile._read(self, 1)
assert self._new_member is False
except EOFError:
return None
return self
def write_member(self, data):
"""Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object.
"""
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member()
|
internetarchive/warc
|
warc/gzip2.py
|
GzipFile.close
|
python
|
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.close_member()
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
|
Closes the gzip with care to handle multiple members.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L73-L86
|
[
"def close_member(self):\n \"\"\"Closes the current member being written.\n \"\"\"\n # The new member is not yet started, no need to close\n if self._new_member:\n return\n\n self.fileobj.write(self.compress.flush())\n write32u(self.fileobj, self.crc)\n # self.size may exceed 2GB, or even 4GB\n write32u(self.fileobj, self.size & 0xffffffffL)\n self.size = 0\n self.compress = zlib.compressobj(9,\n zlib.DEFLATED,\n -zlib.MAX_WBITS,\n zlib.DEF_MEM_LEVEL,\n 0)\n self._new_member = True\n"
] |
class GzipFile(BaseGzipFile):
"""GzipFile with support for multi-member gzip files.
"""
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
BaseGzipFile.__init__(self,
filename=filename,
mode=mode,
compresslevel=compresslevel,
fileobj=fileobj)
if self.mode == WRITE:
# Indicates the start of a new member if value is True.
# The BaseGzipFile constructor already wrote the header for new
# member, so marking as False.
self._new_member = False
# When _member_lock is True, only one member in gzip file is read
self._member_lock = False
def close_member(self):
"""Closes the current member being written.
"""
# The new member is not yet started, no need to close
if self._new_member:
return
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.size = 0
self.compress = zlib.compressobj(9,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._new_member = True
def _start_member(self):
"""Starts writing a new member if required.
"""
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False
def write(self, data):
self._start_member()
BaseGzipFile.write(self, data)
def close(self):
"""Closes the gzip with care to handle multiple members.
"""
if self.fileobj is None:
return
if self.mode == WRITE:
self.close_member()
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def _read(self, size):
# Treat end of member as end of file when _member_lock flag is set
if self._member_lock and self._new_member:
raise EOFError()
else:
return BaseGzipFile._read(self, size)
def read_member(self):
"""Returns a file-like object to read one member from the gzip file.
"""
if self._member_lock is False:
self._member_lock = True
if self._new_member:
try:
# Read one byte to move to the next member
BaseGzipFile._read(self, 1)
assert self._new_member is False
except EOFError:
return None
return self
def write_member(self, data):
"""Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object.
"""
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member()
|
internetarchive/warc
|
warc/gzip2.py
|
GzipFile.read_member
|
python
|
def read_member(self):
if self._member_lock is False:
self._member_lock = True
if self._new_member:
try:
# Read one byte to move to the next member
BaseGzipFile._read(self, 1)
assert self._new_member is False
except EOFError:
return None
return self
|
Returns a file-like object to read one member from the gzip file.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L95-L109
| null |
class GzipFile(BaseGzipFile):
"""GzipFile with support for multi-member gzip files.
"""
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
BaseGzipFile.__init__(self,
filename=filename,
mode=mode,
compresslevel=compresslevel,
fileobj=fileobj)
if self.mode == WRITE:
# Indicates the start of a new member if value is True.
# The BaseGzipFile constructor already wrote the header for new
# member, so marking as False.
self._new_member = False
# When _member_lock is True, only one member in gzip file is read
self._member_lock = False
def close_member(self):
"""Closes the current member being written.
"""
# The new member is not yet started, no need to close
if self._new_member:
return
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.size = 0
self.compress = zlib.compressobj(9,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._new_member = True
def _start_member(self):
"""Starts writing a new member if required.
"""
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False
def write(self, data):
self._start_member()
BaseGzipFile.write(self, data)
def close(self):
"""Closes the gzip with care to handle multiple members.
"""
if self.fileobj is None:
return
if self.mode == WRITE:
self.close_member()
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def _read(self, size):
# Treat end of member as end of file when _member_lock flag is set
if self._member_lock and self._new_member:
raise EOFError()
else:
return BaseGzipFile._read(self, size)
def read_member(self):
"""Returns a file-like object to read one member from the gzip file.
"""
if self._member_lock is False:
self._member_lock = True
if self._new_member:
try:
# Read one byte to move to the next member
BaseGzipFile._read(self, 1)
assert self._new_member is False
except EOFError:
return None
return self
def write_member(self, data):
"""Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object.
"""
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member()
|
internetarchive/warc
|
warc/gzip2.py
|
GzipFile.write_member
|
python
|
def write_member(self, data):
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member()
|
Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L111-L121
|
[
"def write(self, data):\n self._start_member()\n BaseGzipFile.write(self, data)\n",
"def close_member(self):\n \"\"\"Closes the current member being written.\n \"\"\"\n # The new member is not yet started, no need to close\n if self._new_member:\n return\n\n self.fileobj.write(self.compress.flush())\n write32u(self.fileobj, self.crc)\n # self.size may exceed 2GB, or even 4GB\n write32u(self.fileobj, self.size & 0xffffffffL)\n self.size = 0\n self.compress = zlib.compressobj(9,\n zlib.DEFLATED,\n -zlib.MAX_WBITS,\n zlib.DEF_MEM_LEVEL,\n 0)\n self._new_member = True\n"
] |
class GzipFile(BaseGzipFile):
"""GzipFile with support for multi-member gzip files.
"""
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
BaseGzipFile.__init__(self,
filename=filename,
mode=mode,
compresslevel=compresslevel,
fileobj=fileobj)
if self.mode == WRITE:
# Indicates the start of a new member if value is True.
# The BaseGzipFile constructor already wrote the header for new
# member, so marking as False.
self._new_member = False
# When _member_lock is True, only one member in gzip file is read
self._member_lock = False
def close_member(self):
"""Closes the current member being written.
"""
# The new member is not yet started, no need to close
if self._new_member:
return
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.size = 0
self.compress = zlib.compressobj(9,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._new_member = True
def _start_member(self):
"""Starts writing a new member if required.
"""
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False
def write(self, data):
self._start_member()
BaseGzipFile.write(self, data)
def close(self):
"""Closes the gzip with care to handle multiple members.
"""
if self.fileobj is None:
return
if self.mode == WRITE:
self.close_member()
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def _read(self, size):
# Treat end of member as end of file when _member_lock flag is set
if self._member_lock and self._new_member:
raise EOFError()
else:
return BaseGzipFile._read(self, size)
def read_member(self):
"""Returns a file-like object to read one member from the gzip file.
"""
if self._member_lock is False:
self._member_lock = True
if self._new_member:
try:
# Read one byte to move to the next member
BaseGzipFile._read(self, 1)
assert self._new_member is False
except EOFError:
return None
return self
def write_member(self, data):
"""Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object.
"""
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member()
|
internetarchive/warc
|
warc/arc.py
|
ARCHeader.write_to
|
python
|
def write_to(self, f, version = None):
if not version:
version = self.version
if version == 1:
header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(length)s"
elif version == 2:
header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(result_code)s %(checksum)s %(location)s %(offset)s %(filename)s %(length)s"
header = header%dict(url = self['url'],
ip_address = self['ip_address'],
date = self['date'],
content_type = self['content_type'],
result_code = self['result_code'],
checksum = self['checksum'],
location = self['location'],
offset = self['offset'],
filename = self['filename'],
length = self['length'])
f.write(header)
|
Writes out the arc header to the file like object `f`.
If the version field is 1, it writes out an arc v1 header,
otherwise (and this is default), it outputs a v2 header.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L69-L94
| null |
class ARCHeader(CaseInsensitiveDict):
"""
Holds fields from an ARC V1 or V2 header.
V1 header fields are
* url
* ip_address
* date
* content_type
* length (length of the n/w doc in bytes)
V2 header fields are
* url
* ip_address
* date (date of archival)
* content_type
* result_code (response code)
* checksum
* location
* offset (offset from beginning of file to recrod)
* filename (name of arc file)
* length (length of the n/w doc in bytes)
"""
def __init__(self, url = "", ip_address = "", date = "", content_type = "",
result_code = "", checksum = "", location = "", offset = "", filename = "", length = "", version = 2):
if isinstance(date, datetime.datetime):
date = date.strftime("%Y%m%d%H%M%S")
else:
try:
datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Couldn't parse the date '%s' in file header"%date)
self.version = version
CaseInsensitiveDict.__init__(self,
url = url,
ip_address = ip_address,
date = date,
content_type = content_type,
result_code = result_code,
checksum = checksum,
location = location,
offset = offset,
filename = filename,
length = length)
@property
def url(self):
return self["url"]
@property
def ip_address(self):
return self["ip_address"]
@property
def date(self):
return datetime.datetime.strptime(self['date'], "%Y%m%d%H%M%S")
@property
def content_type(self):
return self["content_type"]
@property
def result_code(self):
return self["result_code"]
@property
def checksum (self):
return self["checksum"]
@property
def location(self):
return self["location"]
@property
def offset(self):
return int(self["offset"])
@property
def filename(self):
return self["filename"]
@property
def length(self):
return int(self["length"])
def __str__(self):
f = StringIO.StringIO()
self.write_to(f)
return f.getvalue()
def __repr__(self):
f = {}
for i in "url ip_address date content_typeresult_code checksum location offset filename length".split():
if hasattr(self,i):
f[i] = getattr(self, i)
s = ['%s = "%s"'%(k, v) for k,v in f.iteritems()]
s = ", ".join(s)
return "<ARCHeader(%s)>"%s
|
internetarchive/warc
|
warc/arc.py
|
ARCRecord.from_string
|
python
|
def from_string(cls, string, version):
header, payload = string.split("\n",1)
if payload[0] == '\n': # There's an extra
payload = payload[1:]
if int(version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
return cls(header = arc_header, payload = payload, version = version)
|
Constructs an ARC record from a string and returns it.
TODO: It might be best to merge this with the _read_arc_record
function rather than reimplement the functionality here.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L161-L179
| null |
class ARCRecord(object):
def __init__(self, header = None, payload = None, headers = {}, version = None):
if not (header or headers):
raise TypeError("Can't write create an ARC1 record without a header")
self.header = header or ARCHeader(version = version, **headers)
self.payload = payload
self.version = version
@classmethod
def write_to(self, f, version = None):
version = version or self.version or 2
self.header.write_to(f, version)
f.write("\n") # This separates the header and the body
if isinstance(self.payload, str): #Usually used for small payloads
f.write(self.payload)
elif hasattr(self.payload, "read"): #Used for large payloads where we give a file like object
chunk_size = 10 * 1024 * 1024 # Read 10MB by 10MB
d = self.payload.read(chunk_size)
while d:
f.write(d)
d = self.payload.read(chunk_size)
f.write("\n")
def __getitem__(self, name):
return self.header[name]
def __setitem__(self, name, value):
self.header[name] = value
def __str__(self):
f = StringIO.StringIO()
self.write_to(f)
return f.getvalue()
|
internetarchive/warc
|
warc/arc.py
|
ARCFile._write_header
|
python
|
def _write_header(self):
"Writes out an ARC header"
if "org" not in self.file_headers:
warnings.warn("Using 'unknown' for Archiving organisation name")
self.file_headers['org'] = "Unknown"
if "date" not in self.file_headers:
now = datetime.datetime.utcnow()
warnings.warn("Using '%s' for Archiving time"%now)
self.file_headers['date'] = now
if "ip_address" not in self.file_headers:
warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving")
self.file_headers['ip_address'] = "127.0.0.1"
if self.version == 1:
payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org'])
elif self.version == 2:
payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length"
else:
raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version)
fname = os.path.basename(self.fileobj.name)
header = ARCHeader(url = "filedesc://%s"%fname,
ip_address = self.file_headers['ip_address'],
date = self.file_headers['date'],
content_type = "text/plain",
length = len(payload),
result_code = "200",
checksum = "-",
location = "-",
offset = str(self.fileobj.tell()),
filename = fname)
arc_file_header_record = ARCRecord(header, payload%self.file_headers)
self.write(arc_file_header_record)
|
Writes out an ARC header
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L264-L295
|
[
"def write(self, arc_record):\n \"Writes out the given arc record to the file\"\n if not self.version:\n self.version = 2\n if not self.header_written:\n self.header_written = True\n self._write_header()\n arc_record.write_to(self.fileobj, self.version)\n self.fileobj.write(\"\\n\") # Record separator\n"
] |
class ARCFile(object):
def __init__(self, filename=None, mode=None, fileobj=None, version = None, file_headers = {}):
"""
Initialises a file like object that can be used to read or
write Arc files. Works for both version 1 or version 2.
This can be called similar to the builtin `file` constructor.
It can also just be given a fileobj which is a file like
object that it will use directly for its work.
The file_headers should contain the following fields used to
create the header for the file. The exact fields used depends
on whether v1 or v2 files are being created. If a read is
done, the headers will be autopopulated from the first record.
* ip_address - IP address of the machine doing the Archiving
* date - Date of archival
* org - Organisation that's doing the Archiving.
The version parameter tries to work intuitively as follows
* If version is set to 'n' (where n is 1 or 2), the
library configures itself to read and write version n
ARC files.
* When we try to write a record, it will generate
and write a version n record.
* When we try to read a record, it will attempt to
parse it as a version n record and will error out
if the format is different.
* If the version is unspecified, the library will
configures itself as follows
* When we try to write a record, it will generate
and write a version 2 record.
* When we try to read a record, it will read out one
record and try to guess the version from it (for
the first read).
"""
if fileobj is None:
fileobj = __builtin__.open(filename, mode or "rb")
self.fileobj = fileobj
if version != None and int(version) not in (1, 2):
raise TypeError("ARC version has to be 1 or 2")
self.version = version
self.file_headers = file_headers
self.header_written = False
self.header_read = False
def _write_header(self):
"Writes out an ARC header"
if "org" not in self.file_headers:
warnings.warn("Using 'unknown' for Archiving organisation name")
self.file_headers['org'] = "Unknown"
if "date" not in self.file_headers:
now = datetime.datetime.utcnow()
warnings.warn("Using '%s' for Archiving time"%now)
self.file_headers['date'] = now
if "ip_address" not in self.file_headers:
warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving")
self.file_headers['ip_address'] = "127.0.0.1"
if self.version == 1:
payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org'])
elif self.version == 2:
payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length"
else:
raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version)
fname = os.path.basename(self.fileobj.name)
header = ARCHeader(url = "filedesc://%s"%fname,
ip_address = self.file_headers['ip_address'],
date = self.file_headers['date'],
content_type = "text/plain",
length = len(payload),
result_code = "200",
checksum = "-",
location = "-",
offset = str(self.fileobj.tell()),
filename = fname)
arc_file_header_record = ARCRecord(header, payload%self.file_headers)
self.write(arc_file_header_record)
def write(self, arc_record):
"Writes out the given arc record to the file"
if not self.version:
self.version = 2
if not self.header_written:
self.header_written = True
self._write_header()
arc_record.write_to(self.fileobj, self.version)
self.fileobj.write("\n") # Record separator
def _read_file_header(self):
"""Reads out the file header for the arc file. If version was
not provided, this will autopopulate it."""
header = self.fileobj.readline()
payload1 = self.fileobj.readline()
payload2 = self.fileobj.readline()
version, reserved, organisation = payload1.split(None, 2)
self.fileobj.readline() # Lose the separator newline
self.header_read = True
# print "--------------------------------------------------"
# print header,"\n", payload1, "\n", payload2,"\n"
# print "--------------------------------------------------"
if self.version and int(self.version) != version:
raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version))
if version == '1':
url, ip_address, date, content_type, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 1
elif version == '2':
url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 2
else:
raise IOError("Unknown ARC version '%s'"%version)
def _read_arc_record(self):
"Reads out an arc record, formats it and returns it"
#XXX:Noufal Stream payload here rather than just read it
# r = self.fileobj.readline() # Drop the initial newline
# if r == "":
# return None
# header = self.fileobj.readline()
# Strip the initial new lines and read first line
header = self.fileobj.readline()
while header and header.strip() == "":
header = self.fileobj.readline()
if header == "":
return None
if int(self.version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(self.version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
payload = self.fileobj.read(int(headers['length']))
self.fileobj.readline() # Munge the separator newline.
return ARCRecord(header = arc_header, payload = payload)
def read(self):
"Reads out an arc record from the file"
if not self.header_read:
self._read_file_header()
return self._read_arc_record()
# For compatability with WARCFile
read_record = read
write_record = write
def __iter__(self):
record = self.read()
while record:
yield record
record = self.read()
def close(self):
self.fileobj.close()
|
internetarchive/warc
|
warc/arc.py
|
ARCFile.write
|
python
|
def write(self, arc_record):
"Writes out the given arc record to the file"
if not self.version:
self.version = 2
if not self.header_written:
self.header_written = True
self._write_header()
arc_record.write_to(self.fileobj, self.version)
self.fileobj.write("\n")
|
Writes out the given arc record to the file
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L297-L305
|
[
"def write_to(self, f, version = None):\n version = version or self.version or 2\n self.header.write_to(f, version)\n f.write(\"\\n\") # This separates the header and the body\n if isinstance(self.payload, str): #Usually used for small payloads\n f.write(self.payload)\n elif hasattr(self.payload, \"read\"): #Used for large payloads where we give a file like object\n chunk_size = 10 * 1024 * 1024 # Read 10MB by 10MB\n d = self.payload.read(chunk_size)\n while d:\n f.write(d)\n d = self.payload.read(chunk_size)\n f.write(\"\\n\")\n",
"def _write_header(self):\n \"Writes out an ARC header\"\n if \"org\" not in self.file_headers:\n warnings.warn(\"Using 'unknown' for Archiving organisation name\")\n self.file_headers['org'] = \"Unknown\"\n if \"date\" not in self.file_headers:\n now = datetime.datetime.utcnow()\n warnings.warn(\"Using '%s' for Archiving time\"%now)\n self.file_headers['date'] = now\n if \"ip_address\" not in self.file_headers:\n warnings.warn(\"Using '127.0.0.1' as IP address of machine that's archiving\")\n self.file_headers['ip_address'] = \"127.0.0.1\"\n if self.version == 1:\n payload = \"1 0 %(org)s\\nURL IP-address Archive-date Content-type Archive-length\"%dict(org = self.file_headers['org'])\n elif self.version == 2:\n payload = \"2 0 %(org)s\\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length\"\n else:\n raise IOError(\"Can't write an ARC file with version '\\\"%s\\\"'\"%self.version)\n\n fname = os.path.basename(self.fileobj.name)\n header = ARCHeader(url = \"filedesc://%s\"%fname,\n ip_address = self.file_headers['ip_address'], \n date = self.file_headers['date'],\n content_type = \"text/plain\", \n length = len(payload),\n result_code = \"200\",\n checksum = \"-\", \n location = \"-\",\n offset = str(self.fileobj.tell()),\n filename = fname)\n arc_file_header_record = ARCRecord(header, payload%self.file_headers)\n self.write(arc_file_header_record)\n"
] |
class ARCFile(object):
def __init__(self, filename=None, mode=None, fileobj=None, version = None, file_headers = {}):
"""
Initialises a file like object that can be used to read or
write Arc files. Works for both version 1 or version 2.
This can be called similar to the builtin `file` constructor.
It can also just be given a fileobj which is a file like
object that it will use directly for its work.
The file_headers should contain the following fields used to
create the header for the file. The exact fields used depends
on whether v1 or v2 files are being created. If a read is
done, the headers will be autopopulated from the first record.
* ip_address - IP address of the machine doing the Archiving
* date - Date of archival
* org - Organisation that's doing the Archiving.
The version parameter tries to work intuitively as follows
* If version is set to 'n' (where n is 1 or 2), the
library configures itself to read and write version n
ARC files.
* When we try to write a record, it will generate
and write a version n record.
* When we try to read a record, it will attempt to
parse it as a version n record and will error out
if the format is different.
* If the version is unspecified, the library will
configures itself as follows
* When we try to write a record, it will generate
and write a version 2 record.
* When we try to read a record, it will read out one
record and try to guess the version from it (for
the first read).
"""
if fileobj is None:
fileobj = __builtin__.open(filename, mode or "rb")
self.fileobj = fileobj
if version != None and int(version) not in (1, 2):
raise TypeError("ARC version has to be 1 or 2")
self.version = version
self.file_headers = file_headers
self.header_written = False
self.header_read = False
def _write_header(self):
"Writes out an ARC header"
if "org" not in self.file_headers:
warnings.warn("Using 'unknown' for Archiving organisation name")
self.file_headers['org'] = "Unknown"
if "date" not in self.file_headers:
now = datetime.datetime.utcnow()
warnings.warn("Using '%s' for Archiving time"%now)
self.file_headers['date'] = now
if "ip_address" not in self.file_headers:
warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving")
self.file_headers['ip_address'] = "127.0.0.1"
if self.version == 1:
payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org'])
elif self.version == 2:
payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length"
else:
raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version)
fname = os.path.basename(self.fileobj.name)
header = ARCHeader(url = "filedesc://%s"%fname,
ip_address = self.file_headers['ip_address'],
date = self.file_headers['date'],
content_type = "text/plain",
length = len(payload),
result_code = "200",
checksum = "-",
location = "-",
offset = str(self.fileobj.tell()),
filename = fname)
arc_file_header_record = ARCRecord(header, payload%self.file_headers)
self.write(arc_file_header_record)
# Record separator
def _read_file_header(self):
"""Reads out the file header for the arc file. If version was
not provided, this will autopopulate it."""
header = self.fileobj.readline()
payload1 = self.fileobj.readline()
payload2 = self.fileobj.readline()
version, reserved, organisation = payload1.split(None, 2)
self.fileobj.readline() # Lose the separator newline
self.header_read = True
# print "--------------------------------------------------"
# print header,"\n", payload1, "\n", payload2,"\n"
# print "--------------------------------------------------"
if self.version and int(self.version) != version:
raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version))
if version == '1':
url, ip_address, date, content_type, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 1
elif version == '2':
url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 2
else:
raise IOError("Unknown ARC version '%s'"%version)
def _read_arc_record(self):
"Reads out an arc record, formats it and returns it"
#XXX:Noufal Stream payload here rather than just read it
# r = self.fileobj.readline() # Drop the initial newline
# if r == "":
# return None
# header = self.fileobj.readline()
# Strip the initial new lines and read first line
header = self.fileobj.readline()
while header and header.strip() == "":
header = self.fileobj.readline()
if header == "":
return None
if int(self.version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(self.version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
payload = self.fileobj.read(int(headers['length']))
self.fileobj.readline() # Munge the separator newline.
return ARCRecord(header = arc_header, payload = payload)
def read(self):
"Reads out an arc record from the file"
if not self.header_read:
self._read_file_header()
return self._read_arc_record()
# For compatability with WARCFile
read_record = read
write_record = write
def __iter__(self):
record = self.read()
while record:
yield record
record = self.read()
def close(self):
self.fileobj.close()
|
internetarchive/warc
|
warc/arc.py
|
ARCFile._read_file_header
|
python
|
def _read_file_header(self):
header = self.fileobj.readline()
payload1 = self.fileobj.readline()
payload2 = self.fileobj.readline()
version, reserved, organisation = payload1.split(None, 2)
self.fileobj.readline() # Lose the separator newline
self.header_read = True
# print "--------------------------------------------------"
# print header,"\n", payload1, "\n", payload2,"\n"
# print "--------------------------------------------------"
if self.version and int(self.version) != version:
raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version))
if version == '1':
url, ip_address, date, content_type, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 1
elif version == '2':
url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 2
else:
raise IOError("Unknown ARC version '%s'"%version)
|
Reads out the file header for the arc file. If version was
not provided, this will autopopulate it.
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L307-L335
| null |
class ARCFile(object):
def __init__(self, filename=None, mode=None, fileobj=None, version = None, file_headers = {}):
"""
Initialises a file like object that can be used to read or
write Arc files. Works for both version 1 or version 2.
This can be called similar to the builtin `file` constructor.
It can also just be given a fileobj which is a file like
object that it will use directly for its work.
The file_headers should contain the following fields used to
create the header for the file. The exact fields used depends
on whether v1 or v2 files are being created. If a read is
done, the headers will be autopopulated from the first record.
* ip_address - IP address of the machine doing the Archiving
* date - Date of archival
* org - Organisation that's doing the Archiving.
The version parameter tries to work intuitively as follows
* If version is set to 'n' (where n is 1 or 2), the
library configures itself to read and write version n
ARC files.
* When we try to write a record, it will generate
and write a version n record.
* When we try to read a record, it will attempt to
parse it as a version n record and will error out
if the format is different.
* If the version is unspecified, the library will
configures itself as follows
* When we try to write a record, it will generate
and write a version 2 record.
* When we try to read a record, it will read out one
record and try to guess the version from it (for
the first read).
"""
if fileobj is None:
fileobj = __builtin__.open(filename, mode or "rb")
self.fileobj = fileobj
if version != None and int(version) not in (1, 2):
raise TypeError("ARC version has to be 1 or 2")
self.version = version
self.file_headers = file_headers
self.header_written = False
self.header_read = False
def _write_header(self):
"Writes out an ARC header"
if "org" not in self.file_headers:
warnings.warn("Using 'unknown' for Archiving organisation name")
self.file_headers['org'] = "Unknown"
if "date" not in self.file_headers:
now = datetime.datetime.utcnow()
warnings.warn("Using '%s' for Archiving time"%now)
self.file_headers['date'] = now
if "ip_address" not in self.file_headers:
warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving")
self.file_headers['ip_address'] = "127.0.0.1"
if self.version == 1:
payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org'])
elif self.version == 2:
payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length"
else:
raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version)
fname = os.path.basename(self.fileobj.name)
header = ARCHeader(url = "filedesc://%s"%fname,
ip_address = self.file_headers['ip_address'],
date = self.file_headers['date'],
content_type = "text/plain",
length = len(payload),
result_code = "200",
checksum = "-",
location = "-",
offset = str(self.fileobj.tell()),
filename = fname)
arc_file_header_record = ARCRecord(header, payload%self.file_headers)
self.write(arc_file_header_record)
def write(self, arc_record):
"Writes out the given arc record to the file"
if not self.version:
self.version = 2
if not self.header_written:
self.header_written = True
self._write_header()
arc_record.write_to(self.fileobj, self.version)
self.fileobj.write("\n") # Record separator
def _read_file_header(self):
"""Reads out the file header for the arc file. If version was
not provided, this will autopopulate it."""
header = self.fileobj.readline()
payload1 = self.fileobj.readline()
payload2 = self.fileobj.readline()
version, reserved, organisation = payload1.split(None, 2)
self.fileobj.readline() # Lose the separator newline
self.header_read = True
# print "--------------------------------------------------"
# print header,"\n", payload1, "\n", payload2,"\n"
# print "--------------------------------------------------"
if self.version and int(self.version) != version:
raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version))
if version == '1':
url, ip_address, date, content_type, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 1
elif version == '2':
url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 2
else:
raise IOError("Unknown ARC version '%s'"%version)
def _read_arc_record(self):
"Reads out an arc record, formats it and returns it"
#XXX:Noufal Stream payload here rather than just read it
# r = self.fileobj.readline() # Drop the initial newline
# if r == "":
# return None
# header = self.fileobj.readline()
# Strip the initial new lines and read first line
header = self.fileobj.readline()
while header and header.strip() == "":
header = self.fileobj.readline()
if header == "":
return None
if int(self.version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(self.version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
payload = self.fileobj.read(int(headers['length']))
self.fileobj.readline() # Munge the separator newline.
return ARCRecord(header = arc_header, payload = payload)
def read(self):
"Reads out an arc record from the file"
if not self.header_read:
self._read_file_header()
return self._read_arc_record()
# For compatability with WARCFile
read_record = read
write_record = write
def __iter__(self):
record = self.read()
while record:
yield record
record = self.read()
def close(self):
self.fileobj.close()
|
internetarchive/warc
|
warc/arc.py
|
ARCFile._read_arc_record
|
python
|
def _read_arc_record(self):
"Reads out an arc record, formats it and returns it"
#XXX:Noufal Stream payload here rather than just read it
# r = self.fileobj.readline() # Drop the initial newline
# if r == "":
# return None
# header = self.fileobj.readline()
# Strip the initial new lines and read first line
header = self.fileobj.readline()
while header and header.strip() == "":
header = self.fileobj.readline()
if header == "":
return None
if int(self.version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(self.version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
payload = self.fileobj.read(int(headers['length']))
self.fileobj.readline() # Munge the separator newline.
return ARCRecord(header = arc_header, payload = payload)
|
Reads out an arc record, formats it and returns it
|
train
|
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L337-L366
| null |
class ARCFile(object):
def __init__(self, filename=None, mode=None, fileobj=None, version = None, file_headers = {}):
"""
Initialises a file like object that can be used to read or
write Arc files. Works for both version 1 or version 2.
This can be called similar to the builtin `file` constructor.
It can also just be given a fileobj which is a file like
object that it will use directly for its work.
The file_headers should contain the following fields used to
create the header for the file. The exact fields used depends
on whether v1 or v2 files are being created. If a read is
done, the headers will be autopopulated from the first record.
* ip_address - IP address of the machine doing the Archiving
* date - Date of archival
* org - Organisation that's doing the Archiving.
The version parameter tries to work intuitively as follows
* If version is set to 'n' (where n is 1 or 2), the
library configures itself to read and write version n
ARC files.
* When we try to write a record, it will generate
and write a version n record.
* When we try to read a record, it will attempt to
parse it as a version n record and will error out
if the format is different.
* If the version is unspecified, the library will
configures itself as follows
* When we try to write a record, it will generate
and write a version 2 record.
* When we try to read a record, it will read out one
record and try to guess the version from it (for
the first read).
"""
if fileobj is None:
fileobj = __builtin__.open(filename, mode or "rb")
self.fileobj = fileobj
if version != None and int(version) not in (1, 2):
raise TypeError("ARC version has to be 1 or 2")
self.version = version
self.file_headers = file_headers
self.header_written = False
self.header_read = False
def _write_header(self):
"Writes out an ARC header"
if "org" not in self.file_headers:
warnings.warn("Using 'unknown' for Archiving organisation name")
self.file_headers['org'] = "Unknown"
if "date" not in self.file_headers:
now = datetime.datetime.utcnow()
warnings.warn("Using '%s' for Archiving time"%now)
self.file_headers['date'] = now
if "ip_address" not in self.file_headers:
warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving")
self.file_headers['ip_address'] = "127.0.0.1"
if self.version == 1:
payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org'])
elif self.version == 2:
payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length"
else:
raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version)
fname = os.path.basename(self.fileobj.name)
header = ARCHeader(url = "filedesc://%s"%fname,
ip_address = self.file_headers['ip_address'],
date = self.file_headers['date'],
content_type = "text/plain",
length = len(payload),
result_code = "200",
checksum = "-",
location = "-",
offset = str(self.fileobj.tell()),
filename = fname)
arc_file_header_record = ARCRecord(header, payload%self.file_headers)
self.write(arc_file_header_record)
def write(self, arc_record):
"Writes out the given arc record to the file"
if not self.version:
self.version = 2
if not self.header_written:
self.header_written = True
self._write_header()
arc_record.write_to(self.fileobj, self.version)
self.fileobj.write("\n") # Record separator
def _read_file_header(self):
"""Reads out the file header for the arc file. If version was
not provided, this will autopopulate it."""
header = self.fileobj.readline()
payload1 = self.fileobj.readline()
payload2 = self.fileobj.readline()
version, reserved, organisation = payload1.split(None, 2)
self.fileobj.readline() # Lose the separator newline
self.header_read = True
# print "--------------------------------------------------"
# print header,"\n", payload1, "\n", payload2,"\n"
# print "--------------------------------------------------"
if self.version and int(self.version) != version:
raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version))
if version == '1':
url, ip_address, date, content_type, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 1
elif version == '2':
url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 2
else:
raise IOError("Unknown ARC version '%s'"%version)
def read(self):
"Reads out an arc record from the file"
if not self.header_read:
self._read_file_header()
return self._read_arc_record()
# For compatability with WARCFile
read_record = read
write_record = write
def __iter__(self):
record = self.read()
while record:
yield record
record = self.read()
def close(self):
self.fileobj.close()
|
peterldowns/lggr
|
lggr/__init__.py
|
Printer
|
python
|
def Printer(open_file=sys.stdout, closing=False):
try:
while True:
logstr = (yield)
open_file.write(logstr)
open_file.write('\n') # new line
except GeneratorExit:
if closing:
try: open_file.close()
except: pass
|
Prints items with a timestamp.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L322-L332
| null |
# coding: utf-8
"""
TODO: add a docstring.
"""
import os
import sys
import time
import inspect
import traceback
from lggr.coroutine import coroutine, coroutine_process, coroutine_thread
__version__ = '0.2.2'
DEBUG = 'DEBUG'
INFO = 'INFO'
WARNING = 'WARNING'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) # shortcut
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = 'lggr%s__init%s' % (os.sep, __file[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
#
try:
import threading
except:
threading = None
try:
import multiprocessing as mp
except:
mp = None
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
@coroutine_process
def StderrPrinter():
""" Prints items to stderr. """
return Printer(open_file=sys.stderr, closing=False)
def FilePrinter(filename, mode='a', closing=True):
path = os.path.abspath(os.path.expanduser(filename))
""" Opens the given file and returns a printer to it. """
f = open(path, mode)
return Printer(f, closing)
@coroutine_process
def SocketWriter(host, port, af=None, st=None):
""" Writes messages to a socket/host. """
import socket
if af is None:
af = socket.AF_INET
if st is None:
st = socket.SOCK_STREAM
message = '({0}): {1}'
s = socket.socket(af, st)
s.connect(host, port)
try:
while True:
logstr = (yield)
s.send(logstr)
except GeneratorExit:
s.close()
@coroutine_process
def Emailer(recipients, sender=None):
""" Sends messages as emails to the given list
of recipients. """
import smtplib
hostname = socket.gethostname()
if not sender:
sender = 'lggr@{0}'.format(hostname)
smtp = smtplib.SMTP('localhost')
try:
while True:
logstr = (yield)
try:
smtp.sendmail(sender, recipients, logstr)
except smtplib.SMTPException:
pass
except GeneratorExit:
smtp.quit()
@coroutine_process
def GMailer(recipients, username, password, subject='Log message from lggr.py'):
""" Sends messages as emails to the given list
of recipients, from a GMail account. """
import smtplib
srvr = smtplib.SMTP('smtp.gmail.com', 587)
srvr.ehlo()
srvr.starttls()
srvr.ehlo()
srvr.login(username, password)
if not (isinstance(recipients, list) or isinstance(recipients, tuple)):
recipients = [recipients]
gmail_sender = '{0}@gmail.com'.format(username)
msg = 'To: {0}\nFrom: '+gmail_sender+'\nSubject: '+subject+'\n'
msg = msg + '\n{1}\n\n'
try:
while True:
logstr = (yield)
for rcp in recipients:
message = msg.format(rcp, logstr)
srvr.sendmail(gmail_sender, rcp, message)
except GeneratorExit:
srvr.quit()
|
peterldowns/lggr
|
lggr/__init__.py
|
FilePrinter
|
python
|
def FilePrinter(filename, mode='a', closing=True):
path = os.path.abspath(os.path.expanduser(filename))
f = open(path, mode)
return Printer(f, closing)
|
Opens the given file and returns a printer to it.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L338-L342
|
[
"def wrapper(*args, **kwargs):\n cp = CoroutineProcess(func)\n cp = cp(*args, **kwargs)\n # XXX(todo): use @CoroutineProcess on an individual function, then wrap\n # with @coroutine, too. Don't start until .next().\n return cp\n"
] |
# coding: utf-8
"""
TODO: add a docstring.
"""
import os
import sys
import time
import inspect
import traceback
from lggr.coroutine import coroutine, coroutine_process, coroutine_thread
__version__ = '0.2.2'
DEBUG = 'DEBUG'
INFO = 'INFO'
WARNING = 'WARNING'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) # shortcut
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = 'lggr%s__init%s' % (os.sep, __file[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
#
try:
import threading
except:
threading = None
try:
import multiprocessing as mp
except:
mp = None
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
@coroutine_process
def Printer(open_file=sys.stdout, closing=False):
""" Prints items with a timestamp. """
try:
while True:
logstr = (yield)
open_file.write(logstr)
open_file.write('\n') # new line
except GeneratorExit:
if closing:
try: open_file.close()
except: pass
def StderrPrinter():
""" Prints items to stderr. """
return Printer(open_file=sys.stderr, closing=False)
@coroutine_process
def SocketWriter(host, port, af=None, st=None):
""" Writes messages to a socket/host. """
import socket
if af is None:
af = socket.AF_INET
if st is None:
st = socket.SOCK_STREAM
message = '({0}): {1}'
s = socket.socket(af, st)
s.connect(host, port)
try:
while True:
logstr = (yield)
s.send(logstr)
except GeneratorExit:
s.close()
@coroutine_process
def Emailer(recipients, sender=None):
""" Sends messages as emails to the given list
of recipients. """
import smtplib
hostname = socket.gethostname()
if not sender:
sender = 'lggr@{0}'.format(hostname)
smtp = smtplib.SMTP('localhost')
try:
while True:
logstr = (yield)
try:
smtp.sendmail(sender, recipients, logstr)
except smtplib.SMTPException:
pass
except GeneratorExit:
smtp.quit()
@coroutine_process
def GMailer(recipients, username, password, subject='Log message from lggr.py'):
""" Sends messages as emails to the given list
of recipients, from a GMail account. """
import smtplib
srvr = smtplib.SMTP('smtp.gmail.com', 587)
srvr.ehlo()
srvr.starttls()
srvr.ehlo()
srvr.login(username, password)
if not (isinstance(recipients, list) or isinstance(recipients, tuple)):
recipients = [recipients]
gmail_sender = '{0}@gmail.com'.format(username)
msg = 'To: {0}\nFrom: '+gmail_sender+'\nSubject: '+subject+'\n'
msg = msg + '\n{1}\n\n'
try:
while True:
logstr = (yield)
for rcp in recipients:
message = msg.format(rcp, logstr)
srvr.sendmail(gmail_sender, rcp, message)
except GeneratorExit:
srvr.quit()
|
peterldowns/lggr
|
lggr/__init__.py
|
SocketWriter
|
python
|
def SocketWriter(host, port, af=None, st=None):
import socket
if af is None:
af = socket.AF_INET
if st is None:
st = socket.SOCK_STREAM
message = '({0}): {1}'
s = socket.socket(af, st)
s.connect(host, port)
try:
while True:
logstr = (yield)
s.send(logstr)
except GeneratorExit:
s.close()
|
Writes messages to a socket/host.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L345-L360
| null |
# coding: utf-8
"""
TODO: add a docstring.
"""
import os
import sys
import time
import inspect
import traceback
from lggr.coroutine import coroutine, coroutine_process, coroutine_thread
__version__ = '0.2.2'
DEBUG = 'DEBUG'
INFO = 'INFO'
WARNING = 'WARNING'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) # shortcut
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = 'lggr%s__init%s' % (os.sep, __file[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
#
try:
import threading
except:
threading = None
try:
import multiprocessing as mp
except:
mp = None
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
@coroutine_process
def Printer(open_file=sys.stdout, closing=False):
""" Prints items with a timestamp. """
try:
while True:
logstr = (yield)
open_file.write(logstr)
open_file.write('\n') # new line
except GeneratorExit:
if closing:
try: open_file.close()
except: pass
def StderrPrinter():
""" Prints items to stderr. """
return Printer(open_file=sys.stderr, closing=False)
def FilePrinter(filename, mode='a', closing=True):
path = os.path.abspath(os.path.expanduser(filename))
""" Opens the given file and returns a printer to it. """
f = open(path, mode)
return Printer(f, closing)
@coroutine_process
@coroutine_process
def Emailer(recipients, sender=None):
""" Sends messages as emails to the given list
of recipients. """
import smtplib
hostname = socket.gethostname()
if not sender:
sender = 'lggr@{0}'.format(hostname)
smtp = smtplib.SMTP('localhost')
try:
while True:
logstr = (yield)
try:
smtp.sendmail(sender, recipients, logstr)
except smtplib.SMTPException:
pass
except GeneratorExit:
smtp.quit()
@coroutine_process
def GMailer(recipients, username, password, subject='Log message from lggr.py'):
""" Sends messages as emails to the given list
of recipients, from a GMail account. """
import smtplib
srvr = smtplib.SMTP('smtp.gmail.com', 587)
srvr.ehlo()
srvr.starttls()
srvr.ehlo()
srvr.login(username, password)
if not (isinstance(recipients, list) or isinstance(recipients, tuple)):
recipients = [recipients]
gmail_sender = '{0}@gmail.com'.format(username)
msg = 'To: {0}\nFrom: '+gmail_sender+'\nSubject: '+subject+'\n'
msg = msg + '\n{1}\n\n'
try:
while True:
logstr = (yield)
for rcp in recipients:
message = msg.format(rcp, logstr)
srvr.sendmail(gmail_sender, rcp, message)
except GeneratorExit:
srvr.quit()
|
peterldowns/lggr
|
lggr/__init__.py
|
Emailer
|
python
|
def Emailer(recipients, sender=None):
import smtplib
hostname = socket.gethostname()
if not sender:
sender = 'lggr@{0}'.format(hostname)
smtp = smtplib.SMTP('localhost')
try:
while True:
logstr = (yield)
try:
smtp.sendmail(sender, recipients, logstr)
except smtplib.SMTPException:
pass
except GeneratorExit:
smtp.quit()
|
Sends messages as emails to the given list
of recipients.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L363-L379
| null |
# coding: utf-8
"""
TODO: add a docstring.
"""
import os
import sys
import time
import inspect
import traceback
from lggr.coroutine import coroutine, coroutine_process, coroutine_thread
__version__ = '0.2.2'
DEBUG = 'DEBUG'
INFO = 'INFO'
WARNING = 'WARNING'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) # shortcut
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = 'lggr%s__init%s' % (os.sep, __file[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
#
try:
import threading
except:
threading = None
try:
import multiprocessing as mp
except:
mp = None
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
@coroutine_process
def Printer(open_file=sys.stdout, closing=False):
""" Prints items with a timestamp. """
try:
while True:
logstr = (yield)
open_file.write(logstr)
open_file.write('\n') # new line
except GeneratorExit:
if closing:
try: open_file.close()
except: pass
def StderrPrinter():
""" Prints items to stderr. """
return Printer(open_file=sys.stderr, closing=False)
def FilePrinter(filename, mode='a', closing=True):
path = os.path.abspath(os.path.expanduser(filename))
""" Opens the given file and returns a printer to it. """
f = open(path, mode)
return Printer(f, closing)
@coroutine_process
def SocketWriter(host, port, af=None, st=None):
""" Writes messages to a socket/host. """
import socket
if af is None:
af = socket.AF_INET
if st is None:
st = socket.SOCK_STREAM
message = '({0}): {1}'
s = socket.socket(af, st)
s.connect(host, port)
try:
while True:
logstr = (yield)
s.send(logstr)
except GeneratorExit:
s.close()
@coroutine_process
@coroutine_process
def GMailer(recipients, username, password, subject='Log message from lggr.py'):
""" Sends messages as emails to the given list
of recipients, from a GMail account. """
import smtplib
srvr = smtplib.SMTP('smtp.gmail.com', 587)
srvr.ehlo()
srvr.starttls()
srvr.ehlo()
srvr.login(username, password)
if not (isinstance(recipients, list) or isinstance(recipients, tuple)):
recipients = [recipients]
gmail_sender = '{0}@gmail.com'.format(username)
msg = 'To: {0}\nFrom: '+gmail_sender+'\nSubject: '+subject+'\n'
msg = msg + '\n{1}\n\n'
try:
while True:
logstr = (yield)
for rcp in recipients:
message = msg.format(rcp, logstr)
srvr.sendmail(gmail_sender, rcp, message)
except GeneratorExit:
srvr.quit()
|
peterldowns/lggr
|
lggr/__init__.py
|
GMailer
|
python
|
def GMailer(recipients, username, password, subject='Log message from lggr.py'):
import smtplib
srvr = smtplib.SMTP('smtp.gmail.com', 587)
srvr.ehlo()
srvr.starttls()
srvr.ehlo()
srvr.login(username, password)
if not (isinstance(recipients, list) or isinstance(recipients, tuple)):
recipients = [recipients]
gmail_sender = '{0}@gmail.com'.format(username)
msg = 'To: {0}\nFrom: '+gmail_sender+'\nSubject: '+subject+'\n'
msg = msg + '\n{1}\n\n'
try:
while True:
logstr = (yield)
for rcp in recipients:
message = msg.format(rcp, logstr)
srvr.sendmail(gmail_sender, rcp, message)
except GeneratorExit:
srvr.quit()
|
Sends messages as emails to the given list
of recipients, from a GMail account.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L382-L407
| null |
# coding: utf-8
"""
TODO: add a docstring.
"""
import os
import sys
import time
import inspect
import traceback
from lggr.coroutine import coroutine, coroutine_process, coroutine_thread
__version__ = '0.2.2'
DEBUG = 'DEBUG'
INFO = 'INFO'
WARNING = 'WARNING'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) # shortcut
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = 'lggr%s__init%s' % (os.sep, __file[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
#
try:
import threading
except:
threading = None
try:
import multiprocessing as mp
except:
mp = None
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
@coroutine_process
def Printer(open_file=sys.stdout, closing=False):
""" Prints items with a timestamp. """
try:
while True:
logstr = (yield)
open_file.write(logstr)
open_file.write('\n') # new line
except GeneratorExit:
if closing:
try: open_file.close()
except: pass
def StderrPrinter():
""" Prints items to stderr. """
return Printer(open_file=sys.stderr, closing=False)
def FilePrinter(filename, mode='a', closing=True):
path = os.path.abspath(os.path.expanduser(filename))
""" Opens the given file and returns a printer to it. """
f = open(path, mode)
return Printer(f, closing)
@coroutine_process
def SocketWriter(host, port, af=None, st=None):
""" Writes messages to a socket/host. """
import socket
if af is None:
af = socket.AF_INET
if st is None:
st = socket.SOCK_STREAM
message = '({0}): {1}'
s = socket.socket(af, st)
s.connect(host, port)
try:
while True:
logstr = (yield)
s.send(logstr)
except GeneratorExit:
s.close()
@coroutine_process
def Emailer(recipients, sender=None):
""" Sends messages as emails to the given list
of recipients. """
import smtplib
hostname = socket.gethostname()
if not sender:
sender = 'lggr@{0}'.format(hostname)
smtp = smtplib.SMTP('localhost')
try:
while True:
logstr = (yield)
try:
smtp.sendmail(sender, recipients, logstr)
except smtplib.SMTPException:
pass
except GeneratorExit:
smtp.quit()
@coroutine_process
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.add
|
python
|
def add(self, levels, logger):
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
|
Given a list or tuple of logging levels,
add a logger instance to each.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L84-L91
| null |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.remove
|
python
|
def remove(self, level, logger):
self.config[level].discard(logger)
logger.close()
|
Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L93-L98
| null |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.clear
|
python
|
def clear(self, level):
for item in self.config[level]:
item.close()
self.config[level].clear()
|
Remove all logger functions from a given level.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L100-L104
| null |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr._make_record
|
python
|
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
|
Create a 'record' (a dictionary) with information to be logged.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L106-L199
| null |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr._log
|
python
|
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
|
Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L201-L234
|
[
"def remove(self, level, logger):\n \"\"\" Given a level, remove a given logger function\n if it is a member of that level, closing the logger\n function either way.\"\"\"\n self.config[level].discard(logger)\n logger.close()\n",
"def _make_record(self,\n level,\n fmt,\n args,\n extra,\n exc_info,\n inc_stackinfo,\n inc_multiproc):\n \"\"\" Create a 'record' (a dictionary) with information to be logged. \"\"\"\n\n fn = fname = '(unknown file)'\n lno = 0\n func = '(unknown function)'\n code = '(code not available)'\n cc = []\n sinfo = None\n module = '(unknown module)'\n if _srcfile and inc_stackinfo:\n #IronPython doesn't track Python frames, so _find_caller throws an\n #exception on some versionf of IronPython. We trap it here so that\n #IronPython can use logging.\n try:\n fn, lno, func, code, cc, sinfo = self._find_caller()\n fname = os.path.basename(fn)\n module = os.path.splitext(fname)[0]\n except ValueError:\n pass\n\n if not exc_info or not isinstance(exc_info, tuple):\n # Allow passed in exc_info, but supply it if it isn't\n exc_info = sys.exc_info()\n\n log_record = { # This is available information for logging functions.\n #TODO: proc_name, thread_name\n # see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279\n 'asctime': time.asctime(), # TODO: actual specifier for format\n 'code': code,\n 'codecontext': ''.join(cc),\n 'excinfo' : exc_info,\n 'filename' : fname,\n 'funcname' : func,\n 'levelname' : level,\n 'levelno' : ALL.index(level),\n 'lineno' : lno,\n 'logmessage' : None,\n 'messagefmt' : fmt,\n 'module' : module,\n 'pathname' : fn,\n 'process' : os.getpid(),\n 'processname' : None,\n 'stackinfo' : sinfo,\n 'threadid' : None,\n 'threadname' : None,\n 'time' : time.time(),\n # The custom `extra` information can only be used to format the\n # default format. The `logmessage` can only be passed a dictionary\n # or a list (as `args`).\n 'defaultfmt' : self.config['defaultfmt']\n }\n # If the user passed a single dict, use that with format. If we're\n # passed a tuple or list, dereference its contents as args to format,\n # too. Otherwise, leave the log message as None.\n if args:\n if (isinstance(args, (tuple, list)) and\n len(args) == 1 and\n isinstance(args[0], dict)):\n log_record['logmessage'] = fmt.format(**args[0])\n else:\n log_record['logmessage'] = fmt.format(*args)\n else:\n log_record['logmessage'] = fmt\n\n if extra:\n log_record.update(extra) # add custom variables to record\n\n if threading: # check to use threading\n curthread = threading.current_thread()\n log_record.update({\n 'threadid' : curthread.ident,\n 'threadname' : curthread.name\n })\n\n if not inc_multiproc: # check to use multiprocessing\n procname = None\n else:\n procname = 'MainProcess'\n if mp:\n try:\n procname = mp.curent_process().name\n except StandardError:\n pass\n log_record['processname'] = procname\n\n return log_record\n",
"def info(self, msg, *args, **kwargs):\n \"\"\"' Log a message with INFO level \"\"\"\n self.log(INFO, msg, args, **kwargs)\n"
] |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.log
|
python
|
def log(self, *args, **kwargs):
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
|
Do logging, but handle error suppression.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L236-L246
|
[
"def _log(self,\n level,\n fmt,\n args=None,\n extra=None,\n exc_info=None,\n inc_stackinfo=False,\n inc_multiproc=False):\n \"\"\" Send a log message to all of the logging functions\n for a given level as well as adding the\n message to this logger instance's history. \"\"\"\n if not self.enabled:\n return # Fail silently so that logging can easily be removed\n\n log_record = self._make_record(\n level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)\n\n logstr = log_record['defaultfmt'].format(**log_record) #whoah.\n\n if self.keep_history:\n self.history.append(logstr)\n\n log_funcs = self.config[level]\n to_remove = []\n for lf in log_funcs:\n try:\n lf.send(logstr)\n except StopIteration:\n # in the case that the log function is already closed, add it\n # to the list of functions to be deleted.\n to_remove.append(lf)\n for lf in to_remove:\n self.remove(level, lf)\n self.info('Logging function {} removed from level {}', lf, level)\n"
] |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.debug
|
python
|
def debug(self, msg, *args, **kwargs):
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
|
Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L257-L261
|
[
"def log(self, *args, **kwargs):\n \"\"\" Do logging, but handle error suppression. \"\"\"\n if self.suppress_errors:\n try:\n self._log(*args, **kwargs)\n return True\n except:\n return False\n else:\n self._log(*args, **kwargs)\n return True\n"
] |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.error
|
python
|
def error(self, msg, *args, **kwargs):
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
|
Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L263-L268
|
[
"def log(self, *args, **kwargs):\n \"\"\" Do logging, but handle error suppression. \"\"\"\n if self.suppress_errors:\n try:\n self._log(*args, **kwargs)\n return True\n except:\n return False\n else:\n self._log(*args, **kwargs)\n return True\n"
] |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.critical
|
python
|
def critical(self, msg, *args, **kwargs):
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
|
Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L270-L275
|
[
"def log(self, *args, **kwargs):\n \"\"\" Do logging, but handle error suppression. \"\"\"\n if self.suppress_errors:\n try:\n self._log(*args, **kwargs)\n return True\n except:\n return False\n else:\n self._log(*args, **kwargs)\n return True\n"
] |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.multi
|
python
|
def multi(self, lvl_list, msg, *args, **kwargs):
for level in lvl_list:
self.log(level, msg, args, **kwargs)
|
Log a message at multiple levels
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L277-L280
|
[
"def log(self, *args, **kwargs):\n \"\"\" Do logging, but handle error suppression. \"\"\"\n if self.suppress_errors:\n try:\n self._log(*args, **kwargs)\n return True\n except:\n return False\n else:\n self._log(*args, **kwargs)\n return True\n"
] |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr.all
|
python
|
def all(self, msg, *args, **kwargs):
self.multi(ALL, msg, args, **kwargs)
|
Log a message at every known log level
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L282-L284
|
[
"def multi(self, lvl_list, msg, *args, **kwargs):\n \"\"\" Log a message at multiple levels\"\"\"\n for level in lvl_list:\n self.log(level, msg, args, **kwargs)\n"
] |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def _find_caller(self):
"""
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
"""
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
peterldowns/lggr
|
lggr/__init__.py
|
Lggr._find_caller
|
python
|
def _find_caller(self):
rv = ('(unknown file)',
0,
'(unknown function)',
'(code not available)',
[],
None)
f = inspect.currentframe()
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# When lggr is imported as a module, the `_src_file` filename ends
# in '.pyc', while the filename grabbed from inspect will end in
# '.py'. We use splitext here to compare absolute paths without the
# extension, which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library.
if os.path.splitext(filename)[0] == os.path.splitext(_srcfile)[0]:
f = f.f_back # get out of this logging file
continue
sinfo = traceback.extract_stack(f)
fname, lno, fnc, cc, i = inspect.getframeinfo(f, context=10)
# Mark the calling line with a >
cc = map(lambda info: ('> ' if info[0] == i else '| ') + info[1],
enumerate(cc))
code = '>' + cc[i]
rv = (fname, lno, fnc, code, cc, sinfo)
break
return rv
|
Find the stack frame of the caller so that we can note the source file
name, line number, and function name.
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L286-L319
| null |
class Lggr():
""" Simplified logging. Dispatches messages to any type of logging function
you want to write, all it has to support is send() and close(). """
def __init__(self,
defaultfmt=None,
keep_history=False,
suppress_errors=True):
self.defaultfmt = defaultfmt or '{asctime} ({levelname}) {logmessage}'
self.config = {
# Different levels of logger functions.
CRITICAL: set(),
ERROR: set(),
DEBUG: set(),
WARNING: set(),
INFO: set(),
# Allow lggrname.defaultfmt act as a shortcut.
'defaultfmt': self.defaultfmt
}
self.history = []
self.enabled = True
self.keep_history = keep_history
self.suppress_errors = suppress_errors
# allow instance.LEVEL instead of lggr.LEVEL
self.ALL = ALL
self.DEBUG = DEBUG
self.INFO = INFO
self.WARNING = WARNING
self.ERROR = ERROR
self.CRITICAL = CRITICAL
def disable(self):
""" Turn off logging. """
self.enabled = False
def enable(self):
""" Turn on logging. Enabled by default. """
self.enabled = True
def close(self):
""" Stop and remove all logging functions
and disable this logger. """
for level in ALL:
self.clear(level)
self.disable()
def add(self, levels, logger):
""" Given a list or tuple of logging levels,
add a logger instance to each. """
if isinstance(levels, (list, tuple)):
for lvl in levels:
self.config[lvl].add(logger)
else:
self.config[levels].add(logger)
def remove(self, level, logger):
""" Given a level, remove a given logger function
if it is a member of that level, closing the logger
function either way."""
self.config[level].discard(logger)
logger.close()
def clear(self, level):
""" Remove all logger functions from a given level. """
for item in self.config[level]:
item.close()
self.config[level].clear()
def _make_record(self,
level,
fmt,
args,
extra,
exc_info,
inc_stackinfo,
inc_multiproc):
""" Create a 'record' (a dictionary) with information to be logged. """
fn = fname = '(unknown file)'
lno = 0
func = '(unknown function)'
code = '(code not available)'
cc = []
sinfo = None
module = '(unknown module)'
if _srcfile and inc_stackinfo:
#IronPython doesn't track Python frames, so _find_caller throws an
#exception on some versionf of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, code, cc, sinfo = self._find_caller()
fname = os.path.basename(fn)
module = os.path.splitext(fname)[0]
except ValueError:
pass
if not exc_info or not isinstance(exc_info, tuple):
# Allow passed in exc_info, but supply it if it isn't
exc_info = sys.exc_info()
log_record = { # This is available information for logging functions.
#TODO: proc_name, thread_name
# see http://hg.python.org/cpython/file/74fa415dc715/Lib/logging/__init__.py#l279
'asctime': time.asctime(), # TODO: actual specifier for format
'code': code,
'codecontext': ''.join(cc),
'excinfo' : exc_info,
'filename' : fname,
'funcname' : func,
'levelname' : level,
'levelno' : ALL.index(level),
'lineno' : lno,
'logmessage' : None,
'messagefmt' : fmt,
'module' : module,
'pathname' : fn,
'process' : os.getpid(),
'processname' : None,
'stackinfo' : sinfo,
'threadid' : None,
'threadname' : None,
'time' : time.time(),
# The custom `extra` information can only be used to format the
# default format. The `logmessage` can only be passed a dictionary
# or a list (as `args`).
'defaultfmt' : self.config['defaultfmt']
}
# If the user passed a single dict, use that with format. If we're
# passed a tuple or list, dereference its contents as args to format,
# too. Otherwise, leave the log message as None.
if args:
if (isinstance(args, (tuple, list)) and
len(args) == 1 and
isinstance(args[0], dict)):
log_record['logmessage'] = fmt.format(**args[0])
else:
log_record['logmessage'] = fmt.format(*args)
else:
log_record['logmessage'] = fmt
if extra:
log_record.update(extra) # add custom variables to record
if threading: # check to use threading
curthread = threading.current_thread()
log_record.update({
'threadid' : curthread.ident,
'threadname' : curthread.name
})
if not inc_multiproc: # check to use multiprocessing
procname = None
else:
procname = 'MainProcess'
if mp:
try:
procname = mp.curent_process().name
except StandardError:
pass
log_record['processname'] = procname
return log_record
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level)
def log(self, *args, **kwargs):
""" Do logging, but handle error suppression. """
if self.suppress_errors:
try:
self._log(*args, **kwargs)
return True
except:
return False
else:
self._log(*args, **kwargs)
return True
#debug, info, warning, error, critical
def info(self, msg, *args, **kwargs):
"""' Log a message with INFO level """
self.log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Log a message with WARNING level """
self.log(WARNING, msg, args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Log a message with ERROR level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(ERROR, msg, args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Log a message with CRITICAL level. Automatically includes stack and
process info unless they are specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
kwargs.setdefault('inc_multiproc', True)
self.log(CRITICAL, msg, args, **kwargs)
def multi(self, lvl_list, msg, *args, **kwargs):
""" Log a message at multiple levels"""
for level in lvl_list:
self.log(level, msg, args, **kwargs)
def all(self, msg, *args, **kwargs):
""" Log a message at every known log level """
self.multi(ALL, msg, args, **kwargs)
|
peterldowns/lggr
|
lggr/coroutine.py
|
coroutine
|
python
|
def coroutine(func):
def wrapper(*args, **kwargs):
c = func(*args, **kwargs)
c.next() # prime it for iteration
return c
return wrapper
|
Decorator for priming co-routines that use (yield)
|
train
|
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/coroutine.py#L8-L14
| null |
# coding: utf-8
import sys
import time
import Queue
import threading
import multiprocessing
class CoroutineProcess(multiprocessing.Process):
""" Will run a coroutine in its own process, using the multiprocessing
library. The coroutine thread runs as a daemon, and is closed automatically
when it is no longer needed. Because it exposes send and close methods, a
CoroutineProcess wrapped coroutine can be dropped in for a regular
coroutine."""
def __init__(self, target_func):
multiprocessing.Process.__init__(self)
self.in_queue = multiprocessing.Queue()
self.processor = target_func
self.daemon = True
# Allows the thread to close correctly
self.shutdown = multiprocessing.Event()
def send(self, item):
if self.shutdown.is_set():
raise StopIteration
self.in_queue.put(item)
def __call__(self, *args, **kwargs):
# Prime the wrapped coroutine.
self.processor = self.processor(*args, **kwargs)
self.processor.next()
self.start()
return self
def run(self): # this is the isolated 'process' being run after start() is called
try:
while True:
item = self.in_queue.get()
self.processor.send(item) # throws StopIteration if close() has been called
except StopIteration:
pass
self.close()
def close(self):
self.processor.close()
self.shutdown.set()
def coroutine_process(func):
def wrapper(*args, **kwargs):
cp = CoroutineProcess(func)
cp = cp(*args, **kwargs)
# XXX(todo): use @CoroutineProcess on an individual function, then wrap
# with @coroutine, too. Don't start until .next().
return cp
return wrapper
class CoroutineThread(threading.Thread):
""" Wrapper for coroutines; runs in their own threads. """
def __init__(self, target_func):
threading.Thread.__init__(self) # creates a thread
self.setDaemon(True)
self.in_queue = Queue.Queue() # creates a queue for cross-thread communication
self.processor = target_func # the function to process incoming data
self.shutdown = threading.Event() # watch for close
def send(self, item):
if self.shutdown.isSet():
raise StopIteration
self.in_queue.put(item)
def __call__(self, *args, **kwargs):
# Prime the wrapped coroutine.
self.processor = self.processor(*args, **kwargs)
self.processor.next()
self.start()
return self
def run(self): # this is running in its own thread after it is created
try:
while True:
item = self.in_queue.get()
if self.shutdown.is_set(): break
self.processor.send(item)
except StopIteration:
pass
self.shutdown.set()
def close(self):
self.shutdown.set()
def coroutine_thread(func):
def wrapper(*args, **kwargs):
cp = CoroutineThread(func)
cp = cp(*args, **kwargs)
# XXX(todo): use @CoroutineProcess on an individual function, then wrap
# with @coroutine, too. Don't start until .next().
return cp
return wrapper
|
erikvw/django-collect-offline
|
django_collect_offline/transaction/transaction_deserializer.py
|
save
|
python
|
def save(obj=None, m2m_data=None):
m2m_data = {} if m2m_data is None else m2m_data
obj.save_base(raw=True)
for attr, values in m2m_data.items():
for value in values:
getattr(obj, attr).add(value)
|
Saves a deserialized model object.
Uses save_base to avoid running code in model.save() and
to avoid triggering signals (if raw=True).
|
train
|
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L16-L26
| null |
import socket
from django.apps import apps as django_apps
from django_crypto_fields.constants import LOCAL_MODE
from django_crypto_fields.cryptor import Cryptor
from edc_device.constants import NODE_SERVER, CENTRAL_SERVER
from ..constants import DELETE
from .deserialize import deserialize
class TransactionDeserializerError(Exception):
pass
def aes_decrypt(cipher_text):
return Cryptor().aes_decrypt(cipher_text, LOCAL_MODE)
class TransactionDeserializer:
def __init__(self, using=None, allow_self=None, override_role=None, **kwargs):
app_config = django_apps.get_app_config("edc_device")
self.aes_decrypt = aes_decrypt
self.deserialize = deserialize
self.save = save
self.allow_self = allow_self
self.using = using
if not app_config.is_server:
if override_role not in [NODE_SERVER, CENTRAL_SERVER]:
raise TransactionDeserializerError(
"Transactions may only be deserialized on a server. "
f"Got override_role={override_role}, device={app_config.device_id}, "
f"device_role={app_config.device_role}."
)
def deserialize_transactions(self, transactions=None, deserialize_only=None):
"""Deserializes the encrypted serialized model
instances, tx, in a queryset of transactions.
Note: each transaction instance contains encrypted JSON text
that represents just ONE model instance.
"""
if (
not self.allow_self
and transactions.filter(producer=socket.gethostname()).exists()
):
raise TransactionDeserializerError(
f"Not deserializing own transactions. Got "
f"allow_self=False, hostname={socket.gethostname()}"
)
for transaction in transactions:
json_text = self.aes_decrypt(cipher_text=transaction.tx)
json_text = self.custom_parser(json_text)
deserialized = next(self.deserialize(json_text=json_text))
if not deserialize_only:
if transaction.action == DELETE:
deserialized.object.delete()
else:
self.save(obj=deserialized.object, m2m_data=deserialized.m2m_data)
transaction.is_consumed = True
transaction.save()
def custom_parser(self, json_text=None):
"""Runs json_text thru custom parsers.
"""
app_config = django_apps.get_app_config("django_collect_offline")
for json_parser in app_config.custom_json_parsers:
json_text = json_parser(json_text)
return json_text
|
erikvw/django-collect-offline
|
django_collect_offline/transaction/transaction_deserializer.py
|
TransactionDeserializer.deserialize_transactions
|
python
|
def deserialize_transactions(self, transactions=None, deserialize_only=None):
if (
not self.allow_self
and transactions.filter(producer=socket.gethostname()).exists()
):
raise TransactionDeserializerError(
f"Not deserializing own transactions. Got "
f"allow_self=False, hostname={socket.gethostname()}"
)
for transaction in transactions:
json_text = self.aes_decrypt(cipher_text=transaction.tx)
json_text = self.custom_parser(json_text)
deserialized = next(self.deserialize(json_text=json_text))
if not deserialize_only:
if transaction.action == DELETE:
deserialized.object.delete()
else:
self.save(obj=deserialized.object, m2m_data=deserialized.m2m_data)
transaction.is_consumed = True
transaction.save()
|
Deserializes the encrypted serialized model
instances, tx, in a queryset of transactions.
Note: each transaction instance contains encrypted JSON text
that represents just ONE model instance.
|
train
|
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L49-L75
|
[
"def save(obj=None, m2m_data=None):\n \"\"\"Saves a deserialized model object.\n\n Uses save_base to avoid running code in model.save() and\n to avoid triggering signals (if raw=True).\n \"\"\"\n m2m_data = {} if m2m_data is None else m2m_data\n obj.save_base(raw=True)\n for attr, values in m2m_data.items():\n for value in values:\n getattr(obj, attr).add(value)\n",
"def deserialize(json_text=None):\n \"\"\"Returns a generator of deserialized objects.\n\n Wraps django deserialize with defaults for JSON\n and natural keys.\n\n See https://docs.djangoproject.com/en/2.1/topics/serialization/\n \"\"\"\n\n return serializers.deserialize(\n \"json\",\n json_text,\n ensure_ascii=True,\n use_natural_foreign_keys=True,\n use_natural_primary_keys=False,\n )\n",
"def aes_decrypt(cipher_text):\n return Cryptor().aes_decrypt(cipher_text, LOCAL_MODE)\n",
"def custom_parser(self, json_text=None):\n \"\"\"Runs json_text thru custom parsers.\n \"\"\"\n app_config = django_apps.get_app_config(\"django_collect_offline\")\n for json_parser in app_config.custom_json_parsers:\n json_text = json_parser(json_text)\n return json_text\n"
] |
class TransactionDeserializer:
def __init__(self, using=None, allow_self=None, override_role=None, **kwargs):
app_config = django_apps.get_app_config("edc_device")
self.aes_decrypt = aes_decrypt
self.deserialize = deserialize
self.save = save
self.allow_self = allow_self
self.using = using
if not app_config.is_server:
if override_role not in [NODE_SERVER, CENTRAL_SERVER]:
raise TransactionDeserializerError(
"Transactions may only be deserialized on a server. "
f"Got override_role={override_role}, device={app_config.device_id}, "
f"device_role={app_config.device_role}."
)
def custom_parser(self, json_text=None):
"""Runs json_text thru custom parsers.
"""
app_config = django_apps.get_app_config("django_collect_offline")
for json_parser in app_config.custom_json_parsers:
json_text = json_parser(json_text)
return json_text
|
erikvw/django-collect-offline
|
django_collect_offline/transaction/transaction_deserializer.py
|
TransactionDeserializer.custom_parser
|
python
|
def custom_parser(self, json_text=None):
app_config = django_apps.get_app_config("django_collect_offline")
for json_parser in app_config.custom_json_parsers:
json_text = json_parser(json_text)
return json_text
|
Runs json_text thru custom parsers.
|
train
|
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L77-L83
| null |
class TransactionDeserializer:
def __init__(self, using=None, allow_self=None, override_role=None, **kwargs):
app_config = django_apps.get_app_config("edc_device")
self.aes_decrypt = aes_decrypt
self.deserialize = deserialize
self.save = save
self.allow_self = allow_self
self.using = using
if not app_config.is_server:
if override_role not in [NODE_SERVER, CENTRAL_SERVER]:
raise TransactionDeserializerError(
"Transactions may only be deserialized on a server. "
f"Got override_role={override_role}, device={app_config.device_id}, "
f"device_role={app_config.device_role}."
)
def deserialize_transactions(self, transactions=None, deserialize_only=None):
"""Deserializes the encrypted serialized model
instances, tx, in a queryset of transactions.
Note: each transaction instance contains encrypted JSON text
that represents just ONE model instance.
"""
if (
not self.allow_self
and transactions.filter(producer=socket.gethostname()).exists()
):
raise TransactionDeserializerError(
f"Not deserializing own transactions. Got "
f"allow_self=False, hostname={socket.gethostname()}"
)
for transaction in transactions:
json_text = self.aes_decrypt(cipher_text=transaction.tx)
json_text = self.custom_parser(json_text)
deserialized = next(self.deserialize(json_text=json_text))
if not deserialize_only:
if transaction.action == DELETE:
deserialized.object.delete()
else:
self.save(obj=deserialized.object, m2m_data=deserialized.m2m_data)
transaction.is_consumed = True
transaction.save()
|
erikvw/django-collect-offline
|
django_collect_offline/site_offline_models.py
|
SiteOfflineModels.register
|
python
|
def register(self, models=None, wrapper_cls=None):
self.loaded = True
for model in models:
model = model.lower()
if model not in self.registry:
self.registry.update({model: wrapper_cls or self.wrapper_cls})
if self.register_historical:
historical_model = ".historical".join(model.split("."))
self.registry.update(
{historical_model: wrapper_cls or self.wrapper_cls}
)
else:
raise AlreadyRegistered(f"Model is already registered. Got {model}.")
|
Registers with app_label.modelname, wrapper_cls.
|
train
|
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L28-L42
| null |
class SiteOfflineModels:
module_name = "offline_models"
wrapper_cls = OfflineModel
register_historical = True
def __init__(self):
self.registry = {}
self.loaded = False
def register_for_app(
self, app_label=None, exclude_models=None, exclude_model_classes=None
):
"""Registers all models for this app_label.
"""
models = []
exclude_models = exclude_models or []
app_config = django_apps.get_app_config(app_label)
for model in app_config.get_models():
if model._meta.label_lower in exclude_models:
pass
elif exclude_model_classes and issubclass(model, exclude_model_classes):
pass
else:
models.append(model._meta.label_lower)
self.register(models)
def get_wrapped_instance(self, instance=None):
"""Returns a wrapped model instance.
"""
if instance._meta.label_lower not in self.registry:
raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.")
wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls
if wrapper_cls:
return wrapper_cls(instance)
return instance
def site_models(self, app_label=None):
"""Returns a dictionary of registered models.
"""
site_models = {}
app_configs = (
django_apps.get_app_configs()
if app_label is None
else [django_apps.get_app_config(app_label)]
)
for app_config in app_configs:
model_list = [
model
for model in app_config.get_models()
if model._meta.label_lower in self.registry
]
if model_list:
model_list.sort(key=lambda m: m._meta.verbose_name)
site_models.update({app_config.name: model_list})
return site_models
def autodiscover(self, module_name=None):
module_name = module_name or self.module_name
sys.stdout.write(" * checking for models to register ...\n")
for app in django_apps.app_configs:
try:
mod = import_module(app)
try:
before_import_registry = copy.deepcopy(self.registry)
import_module(f"{app}.{module_name}")
sys.stdout.write(f" * registered models from '{app}'.\n")
except Exception as e:
if f"No module named '{app}.{module_name}'" not in str(e):
raise
self.registry = before_import_registry
if module_has_submodule(mod, module_name):
raise
except ImportError:
pass
|
erikvw/django-collect-offline
|
django_collect_offline/site_offline_models.py
|
SiteOfflineModels.register_for_app
|
python
|
def register_for_app(
self, app_label=None, exclude_models=None, exclude_model_classes=None
):
models = []
exclude_models = exclude_models or []
app_config = django_apps.get_app_config(app_label)
for model in app_config.get_models():
if model._meta.label_lower in exclude_models:
pass
elif exclude_model_classes and issubclass(model, exclude_model_classes):
pass
else:
models.append(model._meta.label_lower)
self.register(models)
|
Registers all models for this app_label.
|
train
|
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L44-L59
|
[
"def register(self, models=None, wrapper_cls=None):\n \"\"\"Registers with app_label.modelname, wrapper_cls.\n \"\"\"\n self.loaded = True\n for model in models:\n model = model.lower()\n if model not in self.registry:\n self.registry.update({model: wrapper_cls or self.wrapper_cls})\n if self.register_historical:\n historical_model = \".historical\".join(model.split(\".\"))\n self.registry.update(\n {historical_model: wrapper_cls or self.wrapper_cls}\n )\n else:\n raise AlreadyRegistered(f\"Model is already registered. Got {model}.\")\n"
] |
class SiteOfflineModels:
module_name = "offline_models"
wrapper_cls = OfflineModel
register_historical = True
def __init__(self):
self.registry = {}
self.loaded = False
def register(self, models=None, wrapper_cls=None):
"""Registers with app_label.modelname, wrapper_cls.
"""
self.loaded = True
for model in models:
model = model.lower()
if model not in self.registry:
self.registry.update({model: wrapper_cls or self.wrapper_cls})
if self.register_historical:
historical_model = ".historical".join(model.split("."))
self.registry.update(
{historical_model: wrapper_cls or self.wrapper_cls}
)
else:
raise AlreadyRegistered(f"Model is already registered. Got {model}.")
def get_wrapped_instance(self, instance=None):
"""Returns a wrapped model instance.
"""
if instance._meta.label_lower not in self.registry:
raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.")
wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls
if wrapper_cls:
return wrapper_cls(instance)
return instance
def site_models(self, app_label=None):
"""Returns a dictionary of registered models.
"""
site_models = {}
app_configs = (
django_apps.get_app_configs()
if app_label is None
else [django_apps.get_app_config(app_label)]
)
for app_config in app_configs:
model_list = [
model
for model in app_config.get_models()
if model._meta.label_lower in self.registry
]
if model_list:
model_list.sort(key=lambda m: m._meta.verbose_name)
site_models.update({app_config.name: model_list})
return site_models
def autodiscover(self, module_name=None):
module_name = module_name or self.module_name
sys.stdout.write(" * checking for models to register ...\n")
for app in django_apps.app_configs:
try:
mod = import_module(app)
try:
before_import_registry = copy.deepcopy(self.registry)
import_module(f"{app}.{module_name}")
sys.stdout.write(f" * registered models from '{app}'.\n")
except Exception as e:
if f"No module named '{app}.{module_name}'" not in str(e):
raise
self.registry = before_import_registry
if module_has_submodule(mod, module_name):
raise
except ImportError:
pass
|
erikvw/django-collect-offline
|
django_collect_offline/site_offline_models.py
|
SiteOfflineModels.get_wrapped_instance
|
python
|
def get_wrapped_instance(self, instance=None):
if instance._meta.label_lower not in self.registry:
raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.")
wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls
if wrapper_cls:
return wrapper_cls(instance)
return instance
|
Returns a wrapped model instance.
|
train
|
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L61-L69
| null |
class SiteOfflineModels:
module_name = "offline_models"
wrapper_cls = OfflineModel
register_historical = True
def __init__(self):
self.registry = {}
self.loaded = False
def register(self, models=None, wrapper_cls=None):
"""Registers with app_label.modelname, wrapper_cls.
"""
self.loaded = True
for model in models:
model = model.lower()
if model not in self.registry:
self.registry.update({model: wrapper_cls or self.wrapper_cls})
if self.register_historical:
historical_model = ".historical".join(model.split("."))
self.registry.update(
{historical_model: wrapper_cls or self.wrapper_cls}
)
else:
raise AlreadyRegistered(f"Model is already registered. Got {model}.")
def register_for_app(
self, app_label=None, exclude_models=None, exclude_model_classes=None
):
"""Registers all models for this app_label.
"""
models = []
exclude_models = exclude_models or []
app_config = django_apps.get_app_config(app_label)
for model in app_config.get_models():
if model._meta.label_lower in exclude_models:
pass
elif exclude_model_classes and issubclass(model, exclude_model_classes):
pass
else:
models.append(model._meta.label_lower)
self.register(models)
def site_models(self, app_label=None):
"""Returns a dictionary of registered models.
"""
site_models = {}
app_configs = (
django_apps.get_app_configs()
if app_label is None
else [django_apps.get_app_config(app_label)]
)
for app_config in app_configs:
model_list = [
model
for model in app_config.get_models()
if model._meta.label_lower in self.registry
]
if model_list:
model_list.sort(key=lambda m: m._meta.verbose_name)
site_models.update({app_config.name: model_list})
return site_models
def autodiscover(self, module_name=None):
module_name = module_name or self.module_name
sys.stdout.write(" * checking for models to register ...\n")
for app in django_apps.app_configs:
try:
mod = import_module(app)
try:
before_import_registry = copy.deepcopy(self.registry)
import_module(f"{app}.{module_name}")
sys.stdout.write(f" * registered models from '{app}'.\n")
except Exception as e:
if f"No module named '{app}.{module_name}'" not in str(e):
raise
self.registry = before_import_registry
if module_has_submodule(mod, module_name):
raise
except ImportError:
pass
|
erikvw/django-collect-offline
|
django_collect_offline/site_offline_models.py
|
SiteOfflineModels.site_models
|
python
|
def site_models(self, app_label=None):
site_models = {}
app_configs = (
django_apps.get_app_configs()
if app_label is None
else [django_apps.get_app_config(app_label)]
)
for app_config in app_configs:
model_list = [
model
for model in app_config.get_models()
if model._meta.label_lower in self.registry
]
if model_list:
model_list.sort(key=lambda m: m._meta.verbose_name)
site_models.update({app_config.name: model_list})
return site_models
|
Returns a dictionary of registered models.
|
train
|
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L71-L89
| null |
class SiteOfflineModels:
module_name = "offline_models"
wrapper_cls = OfflineModel
register_historical = True
def __init__(self):
self.registry = {}
self.loaded = False
def register(self, models=None, wrapper_cls=None):
"""Registers with app_label.modelname, wrapper_cls.
"""
self.loaded = True
for model in models:
model = model.lower()
if model not in self.registry:
self.registry.update({model: wrapper_cls or self.wrapper_cls})
if self.register_historical:
historical_model = ".historical".join(model.split("."))
self.registry.update(
{historical_model: wrapper_cls or self.wrapper_cls}
)
else:
raise AlreadyRegistered(f"Model is already registered. Got {model}.")
def register_for_app(
self, app_label=None, exclude_models=None, exclude_model_classes=None
):
"""Registers all models for this app_label.
"""
models = []
exclude_models = exclude_models or []
app_config = django_apps.get_app_config(app_label)
for model in app_config.get_models():
if model._meta.label_lower in exclude_models:
pass
elif exclude_model_classes and issubclass(model, exclude_model_classes):
pass
else:
models.append(model._meta.label_lower)
self.register(models)
def get_wrapped_instance(self, instance=None):
"""Returns a wrapped model instance.
"""
if instance._meta.label_lower not in self.registry:
raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.")
wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls
if wrapper_cls:
return wrapper_cls(instance)
return instance
def autodiscover(self, module_name=None):
module_name = module_name or self.module_name
sys.stdout.write(" * checking for models to register ...\n")
for app in django_apps.app_configs:
try:
mod = import_module(app)
try:
before_import_registry = copy.deepcopy(self.registry)
import_module(f"{app}.{module_name}")
sys.stdout.write(f" * registered models from '{app}'.\n")
except Exception as e:
if f"No module named '{app}.{module_name}'" not in str(e):
raise
self.registry = before_import_registry
if module_has_submodule(mod, module_name):
raise
except ImportError:
pass
|
erikvw/django-collect-offline
|
django_collect_offline/offline_model.py
|
OfflineModel.has_offline_historical_manager_or_raise
|
python
|
def has_offline_historical_manager_or_raise(self):
try:
model = self.instance.__class__.history.model
except AttributeError:
model = self.instance.__class__
field = [field for field in model._meta.fields if field.name == "history_id"]
if field and not isinstance(field[0], UUIDField):
raise OfflineHistoricalManagerError(
f"Field 'history_id' of historical model "
f"'{model._meta.app_label}.{model._meta.model_name}' "
"must be an UUIDfield. "
"For history = HistoricalRecords() use edc_model.HistoricalRecords instead of "
"simple_history.HistoricalRecords(). "
f"See '{self.instance._meta.app_label}.{self.instance._meta.model_name}'."
)
|
Raises an exception if model uses a history manager and
historical model history_id is not a UUIDField.
Note: expected to use edc_model.HistoricalRecords instead of
simple_history.HistoricalRecords.
|
train
|
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/offline_model.py#L71-L91
| null |
class OfflineModel:
"""A wrapper for offline model instances to add methods called in
signals for synchronization.
"""
def __init__(self, instance):
try:
self.is_serialized = settings.ALLOW_MODEL_SERIALIZATION
except AttributeError:
self.is_serialized = True
self.instance = instance
self.has_offline_historical_manager_or_raise()
self.has_natural_key_or_raise()
self.has_get_by_natural_key_or_raise()
self.has_uuid_primary_key_or_raise()
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.instance)})"
def __str__(self):
return f"{self.instance._meta.label_lower}"
def has_natural_key_or_raise(self):
try:
self.instance.natural_key
except AttributeError:
raise OfflineNaturalKeyMissing(
f"Model '{self.instance._meta.app_label}.{self.instance._meta.model_name}' "
"is missing method natural_key "
)
def has_get_by_natural_key_or_raise(self):
try:
self.instance.__class__.objects.get_by_natural_key
except AttributeError:
raise OfflineGetByNaturalKeyMissing(
f"Model '{self.instance._meta.app_label}.{self.instance._meta.model_name}' "
"is missing manager method get_by_natural_key "
)
def has_uuid_primary_key_or_raise(self):
if self.primary_key_field.get_internal_type() != "UUIDField":
raise OfflineUuidPrimaryKeyMissing(
f"Expected Model '{self.instance._meta.label_lower}' "
f"primary key {self.primary_key_field} to be a UUIDField "
f"(e.g. AutoUUIDField). "
f"Got {self.primary_key_field.get_internal_type()}."
)
@property
def primary_key_field(self):
"""Return the primary key field.
Is `id` in most cases. Is `history_id` for Historical models.
"""
return [field for field in self.instance._meta.fields if field.primary_key][0]
def to_outgoing_transaction(self, using, created=None, deleted=None):
""" Serialize the model instance to an AES encrypted json object
and saves the json object to the OutgoingTransaction model.
"""
OutgoingTransaction = django_apps.get_model(
"django_collect_offline", "OutgoingTransaction"
)
created = True if created is None else created
action = INSERT if created else UPDATE
timestamp_datetime = (
self.instance.created if created else self.instance.modified
)
if not timestamp_datetime:
timestamp_datetime = get_utcnow()
if deleted:
timestamp_datetime = get_utcnow()
action = DELETE
outgoing_transaction = None
if self.is_serialized:
hostname = socket.gethostname()
outgoing_transaction = OutgoingTransaction.objects.using(using).create(
tx_name=self.instance._meta.label_lower,
tx_pk=getattr(self.instance, self.primary_key_field.name),
tx=self.encrypted_json(),
timestamp=timestamp_datetime.strftime("%Y%m%d%H%M%S%f"),
producer=f"{hostname}-{using}",
action=action,
using=using,
)
return outgoing_transaction
def encrypted_json(self):
"""Returns an encrypted json serialized from self.
"""
json = serialize(objects=[self.instance])
encrypted_json = Cryptor().aes_encrypt(json, LOCAL_MODE)
return encrypted_json
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.