repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
rbit/pydtls | dtls/demux/osnet.py | UDPDemux.get_connection | python | def get_connection(self, address):
if not address:
return self._datagram_socket
# Create a new datagram socket bound to the same interface and port as
# the root socket, but connected to the given peer
conn = socket.socket(self._datagram_socket.family,
self._datagram_socket.type,
self._datagram_socket.proto)
conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
conn.bind(self._datagram_socket.getsockname())
conn.connect(address)
_logger.debug("Created new connection for address: %s", address)
return conn | Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance, or the root socket
in case address was None | train | https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/demux/osnet.py#L86-L110 | null | class UDPDemux(object):
"""OS network stack configuring demux
This class implements a demux that creates sockets connected to peer
network endpoints, configuring the network stack to demultiplex
incoming datagrams from these endpoints among these sockets.
Methods:
get_connection -- create a new connection or retrieve an existing one
service -- this method does nothing for this type of demux
"""
def __init__(self, datagram_socket):
"""Constructor
Arguments:
datagram_socket -- the root socket; this must be a bound, unconnected
datagram socket
"""
if datagram_socket.type != socket.SOCK_DGRAM:
raise InvalidSocketError("datagram_socket is not of " +
"type SOCK_DGRAM")
try:
datagram_socket.getsockname()
except:
raise InvalidSocketError("datagram_socket is unbound")
try:
datagram_socket.getpeername()
except:
pass
else:
raise InvalidSocketError("datagram_socket is connected")
datagram_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._datagram_socket = datagram_socket
@staticmethod
def service():
"""Service the root socket
This type of demux performs no servicing work on the root socket,
and instead advises the caller to proceed to listening on the root
socket.
"""
return True
|
rbit/pydtls | dtls/openssl.py | SSL_CTX_set_info_callback | python | def SSL_CTX_set_info_callback(ctx, app_info_cb):
def py_info_callback(ssl, where, ret):
try:
app_info_cb(SSL(ssl), where, ret)
except:
pass
return
global _info_callback
_info_callback[ctx] = _rvoid_voidp_int_int(py_info_callback)
_SSL_CTX_set_info_callback(ctx, _info_callback[ctx]) | Set the info callback
:param callback: The Python callback to use
:return: None | train | https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/openssl.py#L876-L892 | null | # OpenSSL library wrapper: provide access to both OpenSSL dynamic libraries
# through ctypes.
# Copyright 2012 Ray Brown
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The License is also distributed with this work in the file named "LICENSE."
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenSSL Wrapper
This module provides run-time access to the OpenSSL cryptographic and
protocols libraries. It is designed for use with "from openssl import *". For
this reason, the module variable __all__ contains all of this module's
integer constants, OpenSSL library functions, and wrapper functions.
Constants and functions are not documented here. See the OpenSSL library
documentation.
Exceptions:
OpenSSLError -- exception raised when errors occur in the OpenSSL library
"""
import sys
import array
import socket
from logging import getLogger
from os import path
from datetime import timedelta
from err import openssl_error
from err import SSL_ERROR_NONE
from util import _EC_KEY, _BIO
import ctypes
from ctypes import CDLL
from ctypes import CFUNCTYPE
from ctypes import c_void_p, c_int, c_long, c_uint, c_ulong, c_char_p, c_size_t
from ctypes import c_short, c_ushort, c_ubyte, c_char
from ctypes import byref, POINTER, addressof
from ctypes import Structure, Union
from ctypes import create_string_buffer, sizeof, memmove, cast
#
# Module initialization
#
_logger = getLogger(__name__)
#
# Library loading
#
if sys.platform.startswith('win'):
dll_path = path.abspath(path.dirname(__file__))
cryptodll_path = path.join(dll_path, "libeay32.dll")
ssldll_path = path.join(dll_path, "ssleay32.dll")
libcrypto = CDLL(cryptodll_path)
libssl = CDLL(ssldll_path)
else:
libcrypto = CDLL("libcrypto.so.1.0.0")
libssl = CDLL("libssl.so.1.0.0")
#
# Integer constants - exported
#
BIO_NOCLOSE = 0x00
BIO_CLOSE = 0x01
SSLEAY_VERSION = 0
SSL_OP_NO_QUERY_MTU = 0x00001000
SSL_OP_NO_COMPRESSION = 0x00020000
SSL_VERIFY_NONE = 0x00
SSL_VERIFY_PEER = 0x01
SSL_VERIFY_FAIL_IF_NO_PEER_CERT = 0x02
SSL_VERIFY_CLIENT_ONCE = 0x04
SSL_SESS_CACHE_OFF = 0x0000
SSL_SESS_CACHE_CLIENT = 0x0001
SSL_SESS_CACHE_SERVER = 0x0002
SSL_SESS_CACHE_BOTH = SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_SERVER
SSL_SESS_CACHE_NO_AUTO_CLEAR = 0x0080
SSL_SESS_CACHE_NO_INTERNAL_LOOKUP = 0x0100
SSL_SESS_CACHE_NO_INTERNAL_STORE = 0x0200
SSL_SESS_CACHE_NO_INTERNAL = \
SSL_SESS_CACHE_NO_INTERNAL_LOOKUP | SSL_SESS_CACHE_NO_INTERNAL_STORE
SSL_BUILD_CHAIN_FLAG_NONE = 0x0
SSL_BUILD_CHAIN_FLAG_UNTRUSTED = 0x1
SSL_BUILD_CHAIN_FLAG_NO_ROOT = 0x2
SSL_BUILD_CHAIN_FLAG_CHECK = 0x4
SSL_BUILD_CHAIN_FLAG_IGNORE_ERROR = 0x8
SSL_BUILD_CHAIN_FLAG_CLEAR_ERROR = 0x10
SSL_FILE_TYPE_PEM = 1
GEN_DIRNAME = 4
NID_subject_alt_name = 85
CRYPTO_LOCK = 1
SSL_ST_MASK = 0x0FFF
SSL_ST_CONNECT = 0x1000
SSL_ST_ACCEPT = 0x2000
SSL_ST_INIT = (SSL_ST_CONNECT | SSL_ST_ACCEPT)
SSL_ST_BEFORE = 0x4000
SSL_ST_OK = 0x03
SSL_ST_RENEGOTIATE = (0x04 | SSL_ST_INIT)
SSL_ST_ERR = 0x05
SSL_CB_LOOP = 0x01
SSL_CB_EXIT = 0x02
SSL_CB_READ = 0x04
SSL_CB_WRITE = 0x08
SSL_CB_ALERT = 0x4000
SSL_CB_READ_ALERT = (SSL_CB_ALERT | SSL_CB_READ)
SSL_CB_WRITE_ALERT = (SSL_CB_ALERT | SSL_CB_WRITE)
SSL_CB_ACCEPT_LOOP = (SSL_ST_ACCEPT | SSL_CB_LOOP)
SSL_CB_ACCEPT_EXIT = (SSL_ST_ACCEPT | SSL_CB_EXIT)
SSL_CB_CONNECT_LOOP = (SSL_ST_CONNECT | SSL_CB_LOOP)
SSL_CB_CONNECT_EXIT = (SSL_ST_CONNECT | SSL_CB_EXIT)
SSL_CB_HANDSHAKE_START = 0x10
SSL_CB_HANDSHAKE_DONE = 0x20
#
# Integer constants - internal
#
SSL_CTRL_SET_TMP_ECDH = 4
SSL_CTRL_SET_MTU = 17
SSL_CTRL_OPTIONS = 32
SSL_CTRL_SET_READ_AHEAD = 41
SSL_CTRL_SET_SESS_CACHE_MODE = 44
SSL_CTRL_CLEAR_OPTIONS = 77
SSL_CTRL_GET_CURVES = 90
SSL_CTRL_SET_CURVES = 91
SSL_CTRL_SET_CURVES_LIST = 92
SSL_CTRL_GET_SHARED_CURVE = 93
SSL_CTRL_SET_ECDH_AUTO = 94
SSL_CTRL_SET_SIGALGS = 97
SSL_CTRL_SET_SIGALGS_LIST = 98
SSL_CTRL_SET_CLIENT_SIGALGS = 101
SSL_CTRL_SET_CLIENT_SIGALGS_LIST = 102
SSL_CTRL_BUILD_CERT_CHAIN = 105
BIO_CTRL_INFO = 3
BIO_CTRL_DGRAM_SET_CONNECTED = 32
BIO_CTRL_DGRAM_SET_PEER = 44
BIO_CTRL_DGRAM_GET_PEER = 46
BIO_C_SET_NBIO = 102
DTLS_CTRL_GET_TIMEOUT = 73
DTLS_CTRL_HANDLE_TIMEOUT = 74
DTLS_CTRL_LISTEN = 75
DTLS_CTRL_SET_LINK_MTU = 120
X509_NAME_MAXLEN = 256
GETS_MAXLEN = 2048
class _EllipticCurve(object):
_curves = None
@classmethod
def _get_elliptic_curves(cls):
if cls._curves is None:
# Load once
cls._curves = cls._load_elliptic_curves()
return cls._curves
@classmethod
def _load_elliptic_curves(cls):
num_curves = EC_get_builtin_curves(None, 0)
if num_curves > 0:
builtin_curves = create_string_buffer(sizeof(EC_builtin_curve) * num_curves)
EC_get_builtin_curves(cast(builtin_curves, POINTER(EC_builtin_curve)), num_curves)
return [cls(c.nid, OBJ_nid2sn(c.nid)) for c in cast(builtin_curves, POINTER(EC_builtin_curve))[:num_curves]]
return []
def __init__(self, nid, name):
self.nid = nid
self.name = name
def __repr__(self):
return "<Curve %d %r>" % (self.nid, self.name)
def to_EC_KEY(self):
key = _EC_KEY(EC_KEY_new_by_curve_name(self.nid))
return key if bool(key.value) else None
def get_elliptic_curves():
u''' Return the available curves. If not yet loaded, then load them once.
:rtype: list
'''
return _EllipticCurve._get_elliptic_curves()
def get_elliptic_curve(name):
u''' Return the curve from the given name.
:rtype: _EllipticCurve
'''
for curve in get_elliptic_curves():
if curve.name == name:
return curve
raise ValueError("unknown curve name", name)
#
# Parameter data types
#
class c_long_parm(object):
"""Long integer paramter class
c_long must be distinguishable from c_int, as the latter is associated
with a default error checking routine, while the former is not.
"""
class FuncParam(object):
"""Function parameter or return type"""
@classmethod
def from_param(cls, value):
if not isinstance(value, cls):
_logger.error("Parameter type mismatch: %s not of type %s",
value, cls)
raise TypeError(repr(value) + " is not of type " + repr(cls))
return value._as_parameter
def __init__(self, value):
self._as_parameter = c_void_p(value)
def __nonzero__(self):
return bool(self._as_parameter)
@property
def raw(self):
return self._as_parameter.value
class DTLS_Method(FuncParam):
def __init__(self, value):
super(DTLS_Method, self).__init__(value)
class BIO_METHOD(FuncParam):
def __init__(self, value):
super(BIO_METHOD, self).__init__(value)
class SSLCTX(FuncParam):
def __init__(self, value):
super(SSLCTX, self).__init__(value)
class SSL(FuncParam):
def __init__(self, value):
super(SSL, self).__init__(value)
class BIO(FuncParam):
def __init__(self, value):
super(BIO, self).__init__(value)
class EC_KEY(FuncParam):
def __init__(self, value):
super(EC_KEY, self).__init__(value)
class X509(FuncParam):
def __init__(self, value):
super(X509, self).__init__(value)
class X509_val_st(Structure):
_fields_ = [("notBefore", c_void_p),
("notAfter", c_void_p)]
class X509_cinf_st(Structure):
_fields_ = [("version", c_void_p),
("serialNumber", c_void_p),
("signature", c_void_p),
("issuer", c_void_p),
("validity", POINTER(X509_val_st))] # remaining fields omitted
class X509_st(Structure):
_fields_ = [("cert_info", POINTER(X509_cinf_st),)] # remainder omitted
class X509_name_st(Structure):
_fields_ = [("entries", c_void_p)] # remaining fields omitted
class ASN1_OBJECT(FuncParam):
def __init__(self, value):
super(ASN1_OBJECT, self).__init__(value)
class ASN1_STRING(FuncParam):
def __init__(self, value):
super(ASN1_STRING, self).__init__(value)
class ASN1_TIME(FuncParam):
def __init__(self, value):
super(ASN1_TIME, self).__init__(value)
class SSL_CIPHER(FuncParam):
def __init__(self, value):
super(SSL_CIPHER, self).__init__(value)
class GENERAL_NAME_union_d(Union):
_fields_ = [("ptr", c_char_p),
# entries omitted
("directoryName", POINTER(X509_name_st))]
# remaining fields omitted
class STACK(FuncParam):
def __init__(self, value):
super(STACK, self).__init__(value)
class GENERAL_NAME(Structure):
_fields_ = [("type", c_int),
("d", GENERAL_NAME_union_d)]
class GENERAL_NAMES(STACK):
stack_element_type = GENERAL_NAME
def __init__(self, value):
super(GENERAL_NAMES, self).__init__(value)
class STACK_OF_X509(STACK):
stack_element_type = X509
def __init__(self, value):
super(STACK_OF_X509, self).__init__(value)
class X509_NAME_ENTRY(Structure):
_fields_ = [("object", c_void_p),
("value", c_void_p),
("set", c_int),
("size", c_int)]
class ASN1_OCTET_STRING(Structure):
_fields_ = [("length", c_int),
("type", c_int),
("data", POINTER(c_ubyte)),
("flags", c_long)]
class X509_EXTENSION(Structure):
_fields_ = [("object", c_void_p),
("critical", c_int),
("value", POINTER(ASN1_OCTET_STRING))]
class X509V3_EXT_METHOD(Structure):
_fields_ = [("ext_nid", c_int),
("ext_flags", c_int),
("it", c_void_p),
("ext_new", c_int),
("ext_free", c_int),
("d2i", c_int),
("i2d", c_int)] # remaining fields omitted
class TIMEVAL(Structure):
_fields_ = [("tv_sec", c_long),
("tv_usec", c_long)]
class EC_builtin_curve(Structure):
_fields_ = [("nid", c_int),
("comment", c_char_p)]
#
# Socket address conversions
#
class sockaddr_storage(Structure):
_fields_ = [("ss_family", c_short),
("pad", c_char * 126)]
class sockaddr_in(Structure):
_fields_ = [("sin_family", c_short),
("sin_port", c_ushort),
("sin_addr", c_uint * 1),
("sin_zero", c_char * 8)]
class sockaddr_in6(Structure):
_fields_ = [("sin6_family", c_short),
("sin6_port", c_ushort),
("sin6_flowinfo", c_uint),
("sin6_addr", c_uint * 4),
("sin6_scope_id", c_uint)]
class sockaddr_u(Union):
_fields_ = [("ss", sockaddr_storage),
("s4", sockaddr_in),
("s6", sockaddr_in6)]
py_inet_ntop = getattr(socket, "inet_ntop", None)
if not py_inet_ntop:
windll = getattr(ctypes, "windll", None)
if windll:
wsa_inet_ntop = getattr(windll.ws2_32, "inet_ntop", None)
else:
wsa_inet_ntop = None
py_inet_pton = getattr(socket, "inet_pton", None)
if not py_inet_pton:
windll = getattr(ctypes, "windll", None)
if windll:
wsa_inet_pton = getattr(windll.ws2_32, "inet_pton", None)
else:
wsa_inet_pton = None
def inet_ntop(address_family, packed_ip):
if py_inet_ntop:
return py_inet_ntop(address_family,
array.array('I', packed_ip).tostring())
if wsa_inet_ntop:
string_buf = create_string_buffer(47)
wsa_inet_ntop(address_family, packed_ip,
string_buf, sizeof(string_buf))
if not string_buf.value:
raise ValueError("wsa_inet_ntop failed with: %s" %
array.array('I', packed_ip).tostring())
return string_buf.value
if address_family == socket.AF_INET6:
raise ValueError("Platform does not support IPv6")
return socket.inet_ntoa(array.array('I', packed_ip).tostring())
def inet_pton(address_family, string_ip):
if address_family == socket.AF_INET6:
ret_packed_ip = (c_uint * 4)()
else:
ret_packed_ip = (c_uint * 1)()
if py_inet_pton:
ret_string = py_inet_pton(address_family, string_ip)
ret_packed_ip[:] = array.array('I', ret_string)
elif wsa_inet_pton:
if wsa_inet_pton(address_family, string_ip, ret_packed_ip) != 1:
raise ValueError("wsa_inet_pton failed with: %s" % string_ip)
else:
if address_family == socket.AF_INET6:
raise ValueError("Platform does not support IPv6")
ret_string = socket.inet_aton(string_ip)
ret_packed_ip[:] = array.array('I', ret_string)
return ret_packed_ip
def addr_tuple_from_sockaddr_u(su):
if su.ss.ss_family == socket.AF_INET6:
return (inet_ntop(socket.AF_INET6, su.s6.sin6_addr),
socket.ntohs(su.s6.sin6_port),
socket.ntohl(su.s6.sin6_flowinfo),
socket.ntohl(su.s6.sin6_scope_id))
assert su.ss.ss_family == socket.AF_INET
return inet_ntop(socket.AF_INET, su.s4.sin_addr), \
socket.ntohs(su.s4.sin_port)
def sockaddr_u_from_addr_tuple(address):
su = sockaddr_u()
if len(address) > 2:
su.ss.ss_family = socket.AF_INET6
su.s6.sin6_addr[:] = inet_pton(socket.AF_INET6, address[0])
su.s6.sin6_port = socket.htons(address[1])
su.s6.sin6_flowinfo = socket.htonl(address[2])
su.s6.sin6_scope_id = socket.htonl(address[3])
else:
su.ss.ss_family = socket.AF_INET
su.s4.sin_addr[:] = inet_pton(socket.AF_INET, address[0])
su.s4.sin_port = socket.htons(address[1])
return su
#
# Error handling
#
def raise_ssl_error(result, func, args, ssl):
if not ssl:
ssl_error = SSL_ERROR_NONE
else:
ssl_error = _SSL_get_error(ssl, result)
errqueue = []
while True:
err = _ERR_get_error()
if not err:
break
buf = create_string_buffer(512)
_ERR_error_string_n(err, buf, sizeof(buf))
errqueue.append((err, buf.value))
_logger.debug("SSL error raised: ssl_error: %d, result: %d, " +
"errqueue: %s, func_name: %s",
ssl_error, result, errqueue, func.func_name)
raise openssl_error()(ssl_error, errqueue, result, func, args)
def find_ssl_arg(args):
for arg in args:
if isinstance(arg, SSL):
return arg
def errcheck_ord(result, func, args):
if result <= 0:
raise_ssl_error(result, func, args, find_ssl_arg(args))
return args
def errcheck_p(result, func, args):
if not result:
raise_ssl_error(result, func, args, None)
return args
def errcheck_FuncParam(result, func, args):
if not result:
raise_ssl_error(result, func, args, None)
return func.ret_type(result)
#
# Function prototypes
#
def _make_function(name, lib, args, export=True, errcheck="default"):
assert len(args)
def type_subst(map_type):
if _subst.has_key(map_type):
return _subst[map_type]
return map_type
sig = tuple(type_subst(i[0]) for i in args)
# Handle pointer return values (width is architecture-dependent)
if isinstance(sig[0], type) and issubclass(sig[0], FuncParam):
sig = (c_void_p,) + sig[1:]
pointer_return = True
else:
pointer_return = False
if not _sigs.has_key(sig):
_sigs[sig] = CFUNCTYPE(*sig)
if export:
glbl_name = name
__all__.append(name)
else:
glbl_name = "_" + name
func = _sigs[sig]((name, lib), tuple((i[2] if len(i) > 2 else 1,
i[1],
i[3] if len(i) > 3 else None)
[:3 if len(i) > 3 else 2]
for i in args[1:]))
func.func_name = name
if pointer_return:
func.ret_type = args[0][0] # for fix-up during error checking protocol
if errcheck == "default":
# Assign error checker based on return type
if args[0][0] in (c_int,):
errcheck = errcheck_ord
elif args[0][0] in (c_void_p, c_char_p):
errcheck = errcheck_p
elif pointer_return:
errcheck = errcheck_FuncParam
else:
errcheck = None
if errcheck:
func.errcheck = errcheck
globals()[glbl_name] = func
_subst = {c_long_parm: c_long}
_sigs = {}
__all__ = [
# Constants
"BIO_NOCLOSE", "BIO_CLOSE",
"SSLEAY_VERSION",
"SSL_OP_NO_QUERY_MTU", "SSL_OP_NO_COMPRESSION",
"SSL_VERIFY_NONE", "SSL_VERIFY_PEER",
"SSL_VERIFY_FAIL_IF_NO_PEER_CERT", "SSL_VERIFY_CLIENT_ONCE",
"SSL_SESS_CACHE_OFF", "SSL_SESS_CACHE_CLIENT",
"SSL_SESS_CACHE_SERVER", "SSL_SESS_CACHE_BOTH",
"SSL_SESS_CACHE_NO_AUTO_CLEAR", "SSL_SESS_CACHE_NO_INTERNAL_LOOKUP",
"SSL_SESS_CACHE_NO_INTERNAL_STORE", "SSL_SESS_CACHE_NO_INTERNAL",
"SSL_ST_MASK", "SSL_ST_CONNECT", "SSL_ST_ACCEPT", "SSL_ST_INIT", "SSL_ST_BEFORE", "SSL_ST_OK",
"SSL_ST_RENEGOTIATE", "SSL_ST_ERR", "SSL_CB_LOOP", "SSL_CB_EXIT", "SSL_CB_READ", "SSL_CB_WRITE",
"SSL_CB_ALERT", "SSL_CB_READ_ALERT", "SSL_CB_WRITE_ALERT",
"SSL_CB_ACCEPT_LOOP", "SSL_CB_ACCEPT_EXIT",
"SSL_CB_CONNECT_LOOP", "SSL_CB_CONNECT_EXIT",
"SSL_CB_HANDSHAKE_START", "SSL_CB_HANDSHAKE_DONE",
"SSL_BUILD_CHAIN_FLAG_NONE", "SSL_BUILD_CHAIN_FLAG_UNTRUSTED", "SSL_BUILD_CHAIN_FLAG_NO_ROOT",
"SSL_BUILD_CHAIN_FLAG_CHECK", "SSL_BUILD_CHAIN_FLAG_IGNORE_ERROR", "SSL_BUILD_CHAIN_FLAG_CLEAR_ERROR",
"SSL_FILE_TYPE_PEM",
"GEN_DIRNAME", "NID_subject_alt_name",
"CRYPTO_LOCK",
# Methods
"CRYPTO_set_locking_callback",
"DTLSv1_get_timeout", "DTLSv1_handle_timeout",
"DTLSv1_listen",
"DTLS_set_link_mtu",
"BIO_gets", "BIO_read", "BIO_get_mem_data",
"BIO_dgram_set_connected",
"BIO_dgram_get_peer", "BIO_dgram_set_peer",
"BIO_set_nbio",
"SSL_CTX_set_session_cache_mode", "SSL_CTX_set_read_ahead",
"SSL_CTX_set_options", "SSL_CTX_clear_options", "SSL_CTX_get_options",
"SSL_CTX_set1_client_sigalgs_list", "SSL_CTX_set1_client_sigalgs",
"SSL_CTX_set1_sigalgs_list", "SSL_CTX_set1_sigalgs",
"SSL_CTX_set1_curves", "SSL_CTX_set1_curves_list",
"SSL_CTX_set_info_callback",
"SSL_CTX_build_cert_chain",
"SSL_CTX_set_ecdh_auto",
"SSL_CTX_set_tmp_ecdh",
"SSL_read", "SSL_write",
"SSL_set_options", "SSL_clear_options", "SSL_get_options",
"SSL_set1_client_sigalgs_list", "SSL_set1_client_sigalgs",
"SSL_set1_sigalgs_list", "SSL_set1_sigalgs",
"SSL_get1_curves", "SSL_get_shared_curve",
"SSL_set1_curves", "SSL_set1_curves_list",
"SSL_set_mtu",
"SSL_state_string_long", "SSL_alert_type_string_long", "SSL_alert_desc_string_long",
"SSL_get_peer_cert_chain",
"SSL_CTX_set_cookie_cb",
"OBJ_obj2txt", "decode_ASN1_STRING", "ASN1_TIME_print",
"OBJ_nid2sn",
"X509_get_notAfter",
"ASN1_item_d2i", "GENERAL_NAME_print",
"sk_value",
"sk_pop_free",
"i2d_X509",
"get_elliptic_curves",
] # note: the following map adds to this list
map(lambda x: _make_function(*x), (
("SSL_library_init", libssl,
((c_int, "ret"),)),
("SSL_load_error_strings", libssl,
((None, "ret"),)),
("SSLeay", libcrypto,
((c_long_parm, "ret"),)),
("SSLeay_version", libcrypto,
((c_char_p, "ret"), (c_int, "t"))),
("CRYPTO_set_locking_callback", libcrypto,
((None, "ret"), (c_void_p, "func")), False),
("CRYPTO_get_id_callback", libcrypto,
((c_void_p, "ret"),), True, None),
("CRYPTO_num_locks", libcrypto,
((c_int, "ret"),)),
("DTLS_server_method", libssl,
((DTLS_Method, "ret"),)),
("DTLSv1_server_method", libssl,
((DTLS_Method, "ret"),)),
("DTLSv1_2_server_method", libssl,
((DTLS_Method, "ret"),)),
("DTLSv1_client_method", libssl,
((DTLS_Method, "ret"),)),
("DTLSv1_2_client_method", libssl,
((DTLS_Method, "ret"),)),
("SSL_CTX_new", libssl,
((SSLCTX, "ret"), (DTLS_Method, "meth"))),
("SSL_CTX_free", libssl,
((None, "ret"), (SSLCTX, "ctx"))),
("SSL_CTX_set_cookie_generate_cb", libssl,
((None, "ret"), (SSLCTX, "ctx"), (c_void_p, "app_gen_cookie_cb")), False),
("SSL_CTX_set_cookie_verify_cb", libssl,
((None, "ret"), (SSLCTX, "ctx"), (c_void_p, "app_verify_cookie_cb")), False),
("SSL_CTX_set_info_callback", libssl,
((None, "ret"), (SSLCTX, "ctx"), (c_void_p, "app_info_cb")), False),
("SSL_new", libssl,
((SSL, "ret"), (SSLCTX, "ctx"))),
("SSL_free", libssl,
((None, "ret"), (SSL, "ssl"))),
("SSL_set_bio", libssl,
((None, "ret"), (SSL, "ssl"), (BIO, "rbio"), (BIO, "wbio"))),
("BIO_new", libcrypto,
((BIO, "ret"), (BIO_METHOD, "type"))),
("BIO_s_mem", libcrypto,
((BIO_METHOD, "ret"),)),
("BIO_new_file", libcrypto,
((BIO, "ret"), (c_char_p, "filename"), (c_char_p, "mode"))),
("BIO_new_dgram", libcrypto,
((BIO, "ret"), (c_int, "fd"), (c_int, "close_flag"))),
("BIO_free", libcrypto,
((c_int, "ret"), (BIO, "a"))),
("BIO_gets", libcrypto,
((c_int, "ret"), (BIO, "b"), (POINTER(c_char), "buf"), (c_int, "size")), False),
("BIO_read", libcrypto,
((c_int, "ret"), (BIO, "b"), (c_void_p, "buf"), (c_int, "len")), False),
("SSL_CTX_ctrl", libssl,
((c_long_parm, "ret"), (SSLCTX, "ctx"), (c_int, "cmd"), (c_long, "larg"), (c_void_p, "parg")), False),
("BIO_ctrl", libcrypto,
((c_long_parm, "ret"), (BIO, "bp"), (c_int, "cmd"), (c_long, "larg"), (c_void_p, "parg")), False),
("SSL_ctrl", libssl,
((c_long_parm, "ret"), (SSL, "ssl"), (c_int, "cmd"), (c_long, "larg"), (c_void_p, "parg")), False),
("ERR_get_error", libcrypto,
((c_long_parm, "ret"),), False),
("ERR_error_string_n", libcrypto,
((None, "ret"), (c_ulong, "e"), (c_char_p, "buf"), (c_size_t, "len")), False),
("SSL_get_error", libssl,
((c_int, "ret"), (SSL, "ssl"), (c_int, "ret")), False, None),
("SSL_state_string_long", libssl,
((c_char_p, "ret"), (SSL, "ssl")), False),
("SSL_alert_type_string_long", libssl,
((c_char_p, "ret"), (c_int, "value")), False),
("SSL_alert_desc_string_long", libssl,
((c_char_p, "ret"), (c_int, "value")), False),
("SSL_CTX_set_cipher_list", libssl,
((c_int, "ret"), (SSLCTX, "ctx"), (c_char_p, "str"))),
("SSL_CTX_use_certificate_file", libssl,
((c_int, "ret"), (SSLCTX, "ctx"), (c_char_p, "file"), (c_int, "type"))),
("SSL_CTX_use_certificate_chain_file", libssl,
((c_int, "ret"), (SSLCTX, "ctx"), (c_char_p, "file"))),
("SSL_CTX_use_PrivateKey_file", libssl,
((c_int, "ret"), (SSLCTX, "ctx"), (c_char_p, "file"), (c_int, "type"))),
("SSL_CTX_load_verify_locations", libssl,
((c_int, "ret"), (SSLCTX, "ctx"), (c_char_p, "CAfile"), (c_char_p, "CApath"))),
("SSL_CTX_set_verify", libssl,
((None, "ret"), (SSLCTX, "ctx"), (c_int, "mode"), (c_void_p, "verify_callback", 1, None))),
("SSL_accept", libssl,
((c_int, "ret"), (SSL, "ssl"))),
("SSL_connect", libssl,
((c_int, "ret"), (SSL, "ssl"))),
("SSL_set_connect_state", libssl,
((None, "ret"), (SSL, "ssl"))),
("SSL_set_accept_state", libssl,
((None, "ret"), (SSL, "ssl"))),
("SSL_do_handshake", libssl,
((c_int, "ret"), (SSL, "ssl"))),
("SSL_get_peer_certificate", libssl,
((X509, "ret"), (SSL, "ssl"))),
("SSL_get_peer_cert_chain", libssl,
((STACK_OF_X509, "ret"), (SSL, "ssl")), False),
("SSL_read", libssl,
((c_int, "ret"), (SSL, "ssl"), (c_void_p, "buf"), (c_int, "num")), False),
("SSL_write", libssl,
((c_int, "ret"), (SSL, "ssl"), (c_void_p, "buf"), (c_int, "num")), False),
("SSL_pending", libssl,
((c_int, "ret"), (SSL, "ssl")), True, None),
("SSL_shutdown", libssl,
((c_int, "ret"), (SSL, "ssl"))),
("SSL_set_read_ahead", libssl,
((None, "ret"), (SSL, "ssl"), (c_int, "yes"))),
("X509_free", libcrypto,
((None, "ret"), (X509, "a"))),
("PEM_read_bio_X509_AUX", libcrypto,
((X509, "ret"), (BIO, "bp"), (c_void_p, "x", 1, None), (c_void_p, "cb", 1, None), (c_void_p, "u", 1, None))),
("OBJ_obj2txt", libcrypto,
((c_int, "ret"), (POINTER(c_char), "buf"), (c_int, "buf_len"), (ASN1_OBJECT, "a"), (c_int, "no_name")), False),
("OBJ_nid2sn", libcrypto,
((c_char_p, "ret"), (c_int, "n")), False),
("CRYPTO_free", libcrypto,
((None, "ret"), (c_void_p, "ptr"))),
("ASN1_STRING_to_UTF8", libcrypto,
((c_int, "ret"), (POINTER(POINTER(c_ubyte)), "out"), (ASN1_STRING, "in")), False),
("X509_NAME_entry_count", libcrypto,
((c_int, "ret"), (POINTER(X509_name_st), "name")), True, None),
("X509_NAME_get_entry", libcrypto,
((POINTER(X509_NAME_ENTRY), "ret"), (POINTER(X509_name_st), "name"),
(c_int, "loc")), True, errcheck_p),
("X509_NAME_ENTRY_get_object", libcrypto,
((ASN1_OBJECT, "ret"), (POINTER(X509_NAME_ENTRY), "ne"))),
("X509_NAME_ENTRY_get_data", libcrypto,
((ASN1_STRING, "ret"), (POINTER(X509_NAME_ENTRY), "ne"))),
("X509_get_subject_name", libcrypto,
((POINTER(X509_name_st), "ret"), (X509, "a")), True, errcheck_p),
("ASN1_TIME_print", libcrypto,
((c_int, "ret"), (BIO, "fp"), (ASN1_TIME, "a")), False),
("X509_get_ext_by_NID", libcrypto,
((c_int, "ret"), (X509, "x"), (c_int, "nid"), (c_int, "lastpos")), True, None),
("X509_get_ext", libcrypto,
((POINTER(X509_EXTENSION), "ret"), (X509, "x"), (c_int, "loc")), True, errcheck_p),
("X509V3_EXT_get", libcrypto,
((POINTER(X509V3_EXT_METHOD), "ret"), (POINTER(X509_EXTENSION), "ext")), True, errcheck_p),
("ASN1_item_d2i", libcrypto,
((c_void_p, "ret"), (c_void_p, "val"), (POINTER(POINTER(c_ubyte)), "in"), (c_long, "len"), (c_void_p, "it")), False, None),
("sk_num", libcrypto,
((c_int, "ret"), (STACK, "stack")), True, None),
("sk_value", libcrypto,
((c_void_p, "ret"), (STACK, "stack"), (c_int, "loc")), False),
("GENERAL_NAME_print", libcrypto,
((c_int, "ret"), (BIO, "out"), (POINTER(GENERAL_NAME), "gen")), False),
("sk_pop_free", libcrypto,
((None, "ret"), (STACK, "st"), (c_void_p, "func")), False),
("i2d_X509_bio", libcrypto,
((c_int, "ret"), (BIO, "bp"), (X509, "x")), False),
("SSL_get_current_cipher", libssl,
((SSL_CIPHER, "ret"), (SSL, "ssl"))),
("SSL_CIPHER_get_name", libssl,
((c_char_p, "ret"), (SSL_CIPHER, "cipher"))),
("SSL_CIPHER_get_version", libssl,
((c_char_p, "ret"), (SSL_CIPHER, "cipher"))),
("SSL_CIPHER_get_bits", libssl,
((c_int, "ret"), (SSL_CIPHER, "cipher"), (POINTER(c_int), "alg_bits", 1, None)), True, None),
("EC_get_builtin_curves", libcrypto,
((c_int, "ret"), (POINTER(EC_builtin_curve), "r"), (c_int, "nitems"))),
("EC_KEY_new_by_curve_name", libcrypto,
((EC_KEY, "ret"), (c_int, "nid"))),
("EC_KEY_free", libcrypto,
((None, "ret"), (EC_KEY, "key"))),
("EC_curve_nist2nid", libcrypto,
((c_int, "ret"), (POINTER(c_char), "name")), True, None),
("EC_curve_nid2nist", libcrypto,
((c_char_p, "ret"), (c_int, "nid")), True, None),
))
#
# Wrappers - functions generally equivalent to OpenSSL library macros
#
_rvoid_int_int_charp_int = CFUNCTYPE(None, c_int, c_int, c_char_p, c_int)
def CRYPTO_set_locking_callback(locking_function):
def py_locking_function(mode, n, file, line):
try:
locking_function(mode, n, file, line)
except:
_logger.exception("Thread locking failed")
global _locking_cb # for keep-alive
_locking_cb = _rvoid_int_int_charp_int(py_locking_function)
_CRYPTO_set_locking_callback(_locking_cb)
def SSL_CTX_set_session_cache_mode(ctx, mode):
# Returns the previous value of mode
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_SESS_CACHE_MODE, mode, None)
def SSL_CTX_set_read_ahead(ctx, m):
# Returns the previous value of m
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_READ_AHEAD, m, None)
def SSL_CTX_set_options(ctx, options):
# Returns the new option bitmaks after adding the given options
return _SSL_CTX_ctrl(ctx, SSL_CTRL_OPTIONS, options, None)
def SSL_CTX_clear_options(ctx, options):
return _SSL_CTX_ctrl(ctx, SSL_CTRL_CLEAR_OPTIONS, options, None)
def SSL_CTX_get_options(ctx):
return _SSL_CTX_ctrl(ctx, SSL_CTRL_OPTIONS, 0, None)
def SSL_CTX_set1_client_sigalgs(ctx, slist, slistlen):
_slist = (c_int * len(slist))(*slist)
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_CLIENT_SIGALGS, len(_slist), _slist)
def SSL_CTX_set1_client_sigalgs_list(ctx, s):
_s = cast(s, POINTER(c_char))
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_CLIENT_SIGALGS_LIST, 0, _s)
def SSL_CTX_set1_sigalgs(ctx, slist, slistlen):
_slist = (c_int * len(slist))(*slist)
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_SIGALGS, len(_slist), _slist)
def SSL_CTX_set1_sigalgs_list(ctx, s):
_s = cast(s, POINTER(c_char))
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_SIGALGS_LIST, 0, _s)
def SSL_CTX_set1_curves(ctx, clist, clistlen):
_curves = (c_int * len(clist))(*clist)
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_CURVES, len(_curves), _curves)
def SSL_CTX_set1_curves_list(ctx, s):
_s = cast(s, POINTER(c_char))
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_CURVES_LIST, 0, _s)
_rvoid_voidp_int_int = CFUNCTYPE(None, c_void_p, c_int, c_int)
_info_callback = dict()
def SSL_CTX_build_cert_chain(ctx, flags):
return _SSL_CTX_ctrl(ctx, SSL_CTRL_BUILD_CERT_CHAIN, flags, None)
def SSL_CTX_set_ecdh_auto(ctx, onoff):
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_ECDH_AUTO, onoff, None)
def SSL_CTX_set_tmp_ecdh(ctx, ec_key):
# return 1 on success and 0 on failure
_ec_key_p = cast(ec_key.raw, c_void_p)
return _SSL_CTX_ctrl(ctx, SSL_CTRL_SET_TMP_ECDH, 0, _ec_key_p)
_rint_voidp_ubytep_uintp = CFUNCTYPE(c_int, c_void_p, POINTER(c_ubyte),
POINTER(c_uint))
_rint_voidp_ubytep_uint = CFUNCTYPE(c_int, c_void_p, POINTER(c_ubyte), c_uint)
def SSL_CTX_set_cookie_cb(ctx, generate, verify):
def py_generate_cookie_cb(ssl, cookie, cookie_len):
try:
ret_cookie = generate(SSL(ssl))
except:
_logger.exception("Cookie generation failed")
return 0
cookie_len[0] = len(ret_cookie)
memmove(cookie, ret_cookie, cookie_len[0])
_logger.debug("Returning cookie: %s", cookie[:cookie_len[0]])
return 1
def py_verify_cookie_cb(ssl, cookie, cookie_len):
_logger.debug("Verifying cookie: %s", cookie[:cookie_len])
try:
verify(SSL(ssl), ''.join([chr(i) for i in cookie[:cookie_len]]))
except:
_logger.debug("Cookie verification failed")
return 0
return 1
gen_cb = _rint_voidp_ubytep_uintp(py_generate_cookie_cb)
ver_cb = _rint_voidp_ubytep_uint(py_verify_cookie_cb)
_SSL_CTX_set_cookie_generate_cb(ctx, gen_cb)
_SSL_CTX_set_cookie_verify_cb(ctx, ver_cb)
return gen_cb, ver_cb
def BIO_dgram_set_connected(bio, peer_address):
su = sockaddr_u_from_addr_tuple(peer_address)
return _BIO_ctrl(bio, BIO_CTRL_DGRAM_SET_CONNECTED, 0, byref(su))
def BIO_dgram_get_peer(bio):
su = sockaddr_u()
_BIO_ctrl(bio, BIO_CTRL_DGRAM_GET_PEER, 0, byref(su))
return addr_tuple_from_sockaddr_u(su)
def BIO_dgram_set_peer(bio, peer_address):
su = sockaddr_u_from_addr_tuple(peer_address)
return _BIO_ctrl(bio, BIO_CTRL_DGRAM_SET_PEER, 0, byref(su))
def BIO_set_nbio(bio, n):
return _BIO_ctrl(bio, BIO_C_SET_NBIO, 1 if n else 0, None)
def DTLSv1_get_timeout(ssl):
tv = TIMEVAL()
ret = _SSL_ctrl(ssl, DTLS_CTRL_GET_TIMEOUT, 0, byref(tv))
if ret != 1:
return
return timedelta(seconds=tv.tv_sec, microseconds=tv.tv_usec)
def DTLSv1_handle_timeout(ssl):
ret = _SSL_ctrl(ssl, DTLS_CTRL_HANDLE_TIMEOUT, 0, None)
if ret == 0:
# It was too early to call: no timer had yet expired
return False
if ret == 1:
# Buffered messages were retransmitted
return True
# There was an error: either too many timeouts have occurred or a
# retransmission failed
assert ret < 0
if ret > 0:
ret = -10
return errcheck_p(ret, _SSL_ctrl, (ssl, DTLS_CTRL_HANDLE_TIMEOUT, 0, None))
def DTLSv1_listen(ssl):
su = sockaddr_u()
ret = _SSL_ctrl(ssl, DTLS_CTRL_LISTEN, 0, byref(su))
errcheck_ord(ret, _SSL_ctrl, (ssl, DTLS_CTRL_LISTEN, 0, byref(su)))
return addr_tuple_from_sockaddr_u(su)
def DTLS_set_link_mtu(ssl, mtu):
return _SSL_ctrl(ssl, DTLS_CTRL_SET_LINK_MTU, mtu, None)
def SSL_read(ssl, length, buffer):
if buffer:
length = min(length, len(buffer))
buf = (c_char * length).from_buffer(buffer)
else:
buf = create_string_buffer(length)
res_len = _SSL_read(ssl, buf, length)
if buffer:
return res_len
return buf.raw[:res_len]
def SSL_write(ssl, data):
if isinstance(data, str):
str_data = data
elif hasattr(data, "tobytes") and callable(data.tobytes):
str_data = data.tobytes()
elif isinstance(data, ctypes.Array):
str_data = data.raw
else:
str_data = str(data)
return _SSL_write(ssl, str_data, len(str_data))
def SSL_set_options(ssl, op):
return _SSL_ctrl(ssl, SSL_CTRL_OPTIONS, op, None)
def SSL_clear_options(ssl, op):
return _SSL_ctrl(ssl, SSL_CTRL_CLEAR_OPTIONS, op, None)
def SSL_get_options(ssl):
return _SSL_ctrl(ssl, SSL_CTRL_OPTIONS, 0, None)
def SSL_set1_client_sigalgs(ssl, slist, slistlen):
_slist = (c_int * len(slist))(*slist)
return _SSL_ctrl(ssl, SSL_CTRL_SET_CLIENT_SIGALGS, len(_slist), _slist)
def SSL_set1_client_sigalgs_list(ssl, s):
_s = cast(s, POINTER(c_char))
return _SSL_ctrl(ssl, SSL_CTRL_SET_CLIENT_SIGALGS_LIST, 0, _s)
def SSL_set1_sigalgs(ssl, slist, slistlen):
_slist = (c_int * len(slist))(*slist)
return _SSL_ctrl(ssl, SSL_CTRL_SET_SIGALGS, len(_slist), _slist)
def SSL_set1_sigalgs_list(ssl, s):
_s = cast(s, POINTER(c_char))
return _SSL_ctrl(ssl, SSL_CTRL_SET_SIGALGS_LIST, 0, _s)
def SSL_get1_curves(ssl, curves=None):
assert curves is None or isinstance(curves, list)
if curves is not None:
cnt = SSL_get1_curves(ssl, None)
if cnt:
mem = create_string_buffer(sizeof(POINTER(c_int)) * cnt)
_SSL_ctrl(ssl, SSL_CTRL_GET_CURVES, 0, mem)
for x in cast(mem, POINTER(c_int))[:cnt]:
curves.append(x)
return cnt
else:
return _SSL_ctrl(ssl, SSL_CTRL_GET_CURVES, 0, None)
def SSL_get_shared_curve(ssl, n):
return _SSL_ctrl(ssl, SSL_CTRL_GET_SHARED_CURVE, n, 0)
def SSL_set1_curves(ssl, clist, clistlen):
_curves = (c_int * len(clist))(*clist)
return _SSL_ctrl(ssl, SSL_CTRL_SET_CURVES, len(_curves), _curves)
def SSL_set1_curves_list(ssl, s):
_s = cast(s, POINTER(c_char))
return _SSL_ctrl(ssl, SSL_CTRL_SET_CURVES_LIST, 0, _s)
def SSL_set_mtu(ssl, mtu):
return _SSL_ctrl(ssl, SSL_CTRL_SET_MTU, mtu, None)
def SSL_state_string_long(ssl):
try:
ret = _SSL_state_string_long(ssl)
except:
pass
return ret
def SSL_alert_type_string_long(value):
try:
ret = _SSL_alert_type_string_long(value)
except:
pass
return ret
def SSL_alert_desc_string_long(value):
try:
ret = _SSL_alert_desc_string_long(value)
except:
pass
return ret
def OBJ_obj2txt(asn1_object, no_name):
buf = create_string_buffer(X509_NAME_MAXLEN)
res_len = _OBJ_obj2txt(buf, sizeof(buf), asn1_object, 1 if no_name else 0)
return buf.raw[:res_len]
def OBJ_nid2sn(nid):
_name = _OBJ_nid2sn(nid)
return cast(_name, c_char_p).value.decode("ascii")
def decode_ASN1_STRING(asn1_string):
utf8_buf_ptr = POINTER(c_ubyte)()
res_len = _ASN1_STRING_to_UTF8(byref(utf8_buf_ptr), asn1_string)
try:
return unicode(''.join([chr(i) for i in utf8_buf_ptr[:res_len]]),
'utf-8')
finally:
CRYPTO_free(utf8_buf_ptr)
def X509_get_notAfter(x509):
x509_raw = X509.from_param(x509)
x509_ptr = cast(x509_raw, POINTER(X509_st))
notAfter = x509_ptr.contents.cert_info.contents.validity.contents.notAfter
return ASN1_TIME(notAfter)
def BIO_gets(bio):
buf = create_string_buffer(GETS_MAXLEN)
res_len = _BIO_gets(bio, buf, sizeof(buf) - 1)
return buf.raw[:res_len]
def BIO_read(bio, length):
buf = create_string_buffer(length)
res_len = _BIO_read(bio, buf, sizeof(buf))
return buf.raw[:res_len]
def BIO_get_mem_data(bio):
buf = POINTER(c_ubyte)()
res_len = _BIO_ctrl(bio, BIO_CTRL_INFO, 0, byref(buf))
return ''.join([chr(i) for i in buf[:res_len]])
def ASN1_TIME_print(asn1_time):
bio = _BIO(BIO_new(BIO_s_mem()))
_ASN1_TIME_print(bio.value, asn1_time)
return BIO_gets(bio.value)
_rvoidp = CFUNCTYPE(c_void_p)
def _ASN1_ITEM_ptr(item):
if sys.platform.startswith('win'):
func_ptr = _rvoidp(item)
return func_ptr()
return item
_rvoidp_voidp_ubytepp_long = CFUNCTYPE(c_void_p, c_void_p,
POINTER(POINTER(c_ubyte)), c_long)
def ASN1_item_d2i(method, asn1_octet_string):
data_in = POINTER(c_ubyte)(asn1_octet_string.data.contents)
if method.it:
return GENERAL_NAMES(_ASN1_item_d2i(None, byref(data_in),
asn1_octet_string.length,
_ASN1_ITEM_ptr(method.it)))
func_ptr = _rvoidp_voidp_ubytepp_long(method.d2i)
return GENERAL_NAMES(func_ptr(None, byref(data_in),
asn1_octet_string.length))
def sk_value(stack, loc):
return cast(_sk_value(stack, loc), POINTER(stack.stack_element_type))
def GENERAL_NAME_print(general_name):
bio = _BIO(BIO_new(BIO_s_mem()))
_GENERAL_NAME_print(bio.value, general_name)
return BIO_gets(bio.value)
_free_func = addressof(c_void_p.in_dll(libcrypto, "sk_free"))
def sk_pop_free(stack):
_sk_pop_free(stack, _free_func)
def i2d_X509(x509):
bio = _BIO(BIO_new(BIO_s_mem()))
_i2d_X509_bio(bio.value, x509)
return BIO_get_mem_data(bio.value)
def SSL_get_peer_cert_chain(ssl):
stack = _SSL_get_peer_cert_chain(ssl)
num = sk_num(stack)
certs = []
if num:
# why not use sk_value(): because it doesn't cast correct in this case?!
# certs = [(sk_value(stack, i)) for i in xrange(num)]
certs = [X509(_sk_value(stack, i)) for i in xrange(num)]
return stack, num, certs
|
rbit/pydtls | dtls/err.py | raise_ssl_error | python | def raise_ssl_error(code, nested=None):
err_string = str(code) + ": " + _ssl_errors[code]
if nested:
raise SSLError(code, err_string + str(nested))
raise SSLError(code, err_string) | Raise an SSL error with the given error code | train | https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/err.py#L108-L113 | null | # DTLS exceptions.
# Copyright 2012 Ray Brown
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The License is also distributed with this work in the file named "LICENSE."
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DTLS Errors
This module defines error functionality and exception types for the dtls
package.
Classes:
SSLError -- exception raised for I/O errors
InvalidSocketError -- exception raised for improper socket objects
"""
from socket import error as socket_error
SSL_ERROR_NONE = 0
SSL_ERROR_SSL = 1
SSL_ERROR_WANT_READ = 2
SSL_ERROR_WANT_WRITE = 3
SSL_ERROR_WANT_X509_LOOKUP = 4
SSL_ERROR_SYSCALL = 5
SSL_ERROR_ZERO_RETURN = 6
SSL_ERROR_WANT_CONNECT = 7
SSL_ERROR_WANT_ACCEPT = 8
ERR_BOTH_KEY_CERT_FILES = 500
ERR_BOTH_KEY_CERT_FILES_SVR = 298
ERR_NO_CERTS = 331
ERR_NO_CIPHER = 501
ERR_READ_TIMEOUT = 502
ERR_WRITE_TIMEOUT = 503
ERR_HANDSHAKE_TIMEOUT = 504
ERR_PORT_UNREACHABLE = 505
ERR_WRONG_SSL_VERSION = 0x1409210A
ERR_WRONG_VERSION_NUMBER = 0x1408A10B
ERR_COOKIE_MISMATCH = 0x1408A134
ERR_CERTIFICATE_VERIFY_FAILED = 0x14090086
ERR_NO_SHARED_CIPHER = 0x1408A0C1
ERR_SSL_HANDSHAKE_FAILURE = 0x1410C0E5
ERR_TLSV1_ALERT_UNKNOWN_CA = 0x14102418
def patch_ssl_errors():
import ssl
errors = [i for i in globals().iteritems() if type(i[1]) == int and str(i[0]).startswith('ERR_')]
for k, v in errors:
if not hasattr(ssl, k):
setattr(ssl, k, v)
class SSLError(socket_error):
"""This exception is raised by modules in the dtls package."""
def __init__(self, *args):
super(SSLError, self).__init__(*args)
class InvalidSocketError(Exception):
"""There is a problem with a socket passed to the dtls package."""
def __init__(self, *args):
super(InvalidSocketError, self).__init__(*args)
def _make_opensslerror_class():
global _OpenSSLError
class __OpenSSLError(SSLError):
"""
This exception is raised when an error occurs in the OpenSSL library
"""
def __init__(self, ssl_error, errqueue, result, func, args):
self.ssl_error = ssl_error
self.errqueue = errqueue
self.result = result
self.func = func
self.args = args
SSLError.__init__(self, ssl_error, errqueue,
result, func, args)
_OpenSSLError = __OpenSSLError
_make_opensslerror_class()
def openssl_error():
"""Return the OpenSSL error type for use in exception clauses"""
return _OpenSSLError
def raise_as_ssl_module_error():
"""Exceptions raised from this module are instances of ssl.SSLError"""
import ssl
global SSLError
SSLError = ssl.SSLError
_make_opensslerror_class()
_ssl_errors = {
ERR_NO_CERTS: "No root certificates specified for verification " + \
"of other-side certificates",
ERR_BOTH_KEY_CERT_FILES: "Both the key & certificate files " + \
"must be specified",
ERR_BOTH_KEY_CERT_FILES_SVR: "Both the key & certificate files must be " + \
"specified for server-side operation",
ERR_NO_CIPHER: "No cipher can be selected.",
ERR_READ_TIMEOUT: "The read operation timed out",
ERR_WRITE_TIMEOUT: "The write operation timed out",
ERR_HANDSHAKE_TIMEOUT: "The handshake operation timed out",
ERR_PORT_UNREACHABLE: "The peer address is not reachable",
}
|
rbit/pydtls | dtls/demux/router.py | UDPDemux.get_connection | python | def get_connection(self, address):
if self.connections.has_key(address):
return self.connections[address]
# We need a new datagram socket on a dynamically assigned ephemeral port
conn = socket.socket(self._forwarding_socket.family,
self._forwarding_socket.type,
self._forwarding_socket.proto)
conn.bind((self._forwarding_socket.getsockname()[0], 0))
conn.connect(self._forwarding_socket.getsockname())
if not address:
conn.setblocking(0)
self.connections[address] = conn
_logger.debug("Created new connection for address: %s", address)
return conn | Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance | train | https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/demux/router.py#L94-L118 | null | class UDPDemux(object):
"""Explicitly routing UDP demux
This class implements a demux that forwards packets from the root
socket to sockets belonging to connections. It does this whenever its
service method is invoked.
Methods:
remove_connection -- remove an existing connection
service -- distribute datagrams from the root socket to connections
forward -- forward a stored datagram to a connection
"""
_forwarding_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_forwarding_socket.bind(('127.0.0.1', 0))
def __init__(self, datagram_socket):
"""Constructor
Arguments:
datagram_socket -- the root socket; this must be a bound, unconnected
datagram socket
"""
if datagram_socket.type != socket.SOCK_DGRAM:
raise InvalidSocketError("datagram_socket is not of " +
"type SOCK_DGRAM")
try:
datagram_socket.getsockname()
except:
raise InvalidSocketError("datagram_socket is unbound")
try:
datagram_socket.getpeername()
except:
pass
else:
raise InvalidSocketError("datagram_socket is connected")
self.datagram_socket = datagram_socket
self.payload = ""
self.payload_peer_address = None
self.connections = WeakValueDictionary()
def get_connection(self, address):
"""Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance
"""
if self.connections.has_key(address):
return self.connections[address]
# We need a new datagram socket on a dynamically assigned ephemeral port
conn = socket.socket(self._forwarding_socket.family,
self._forwarding_socket.type,
self._forwarding_socket.proto)
conn.bind((self._forwarding_socket.getsockname()[0], 0))
conn.connect(self._forwarding_socket.getsockname())
if not address:
conn.setblocking(0)
self.connections[address] = conn
_logger.debug("Created new connection for address: %s", address)
return conn
def remove_connection(self, address):
"""Remove a muxed connection
Arguments:
address -- an address that was previously returned by the service
method and whose connection has not yet been removed
Return:
the socket object whose connection has been removed
"""
return self.connections.pop(address)
def service(self):
"""Service the root socket
Read from the root socket and forward one datagram to a
connection. The call will return without forwarding data
if any of the following occurs:
* An error is encountered while reading from the root socket
* Reading from the root socket times out
* The root socket is non-blocking and has no data available
* An empty payload is received
* A non-empty payload is received from an unknown peer (a peer
for which get_connection has not yet been called); in this case,
the payload is held by this instance and will be forwarded when
the forward method is called
Return:
if the datagram received was from a new peer, then the peer's
address; otherwise None
"""
self.payload, self.payload_peer_address = \
self.datagram_socket.recvfrom(UDP_MAX_DGRAM_LENGTH)
_logger.debug("Received datagram from peer: %s",
self.payload_peer_address)
if not self.payload:
self.payload_peer_address = None
return
if self.connections.has_key(self.payload_peer_address):
self.forward()
else:
return self.payload_peer_address
def forward(self):
"""Forward a stored datagram
When the service method returns the address of a new peer, it holds
the datagram from that peer in this instance. In this case, this
method will perform the forwarding step. The target connection is the
one associated with address None if get_connection has not been called
since the service method returned the new peer's address, and the
connection associated with the new peer's address if it has.
"""
assert self.payload
assert self.payload_peer_address
if self.connections.has_key(self.payload_peer_address):
conn = self.connections[self.payload_peer_address]
default = False
else:
conn = self.connections[None] # propagate exception if not created
default = True
_logger.debug("Forwarding datagram from peer: %s, default: %s",
self.payload_peer_address, default)
self._forwarding_socket.sendto(self.payload, conn.getsockname())
self.payload = ""
self.payload_peer_address = None
|
rbit/pydtls | dtls/demux/router.py | UDPDemux.service | python | def service(self):
self.payload, self.payload_peer_address = \
self.datagram_socket.recvfrom(UDP_MAX_DGRAM_LENGTH)
_logger.debug("Received datagram from peer: %s",
self.payload_peer_address)
if not self.payload:
self.payload_peer_address = None
return
if self.connections.has_key(self.payload_peer_address):
self.forward()
else:
return self.payload_peer_address | Service the root socket
Read from the root socket and forward one datagram to a
connection. The call will return without forwarding data
if any of the following occurs:
* An error is encountered while reading from the root socket
* Reading from the root socket times out
* The root socket is non-blocking and has no data available
* An empty payload is received
* A non-empty payload is received from an unknown peer (a peer
for which get_connection has not yet been called); in this case,
the payload is held by this instance and will be forwarded when
the forward method is called
Return:
if the datagram received was from a new peer, then the peer's
address; otherwise None | train | https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/demux/router.py#L133-L164 | null | class UDPDemux(object):
"""Explicitly routing UDP demux
This class implements a demux that forwards packets from the root
socket to sockets belonging to connections. It does this whenever its
service method is invoked.
Methods:
remove_connection -- remove an existing connection
service -- distribute datagrams from the root socket to connections
forward -- forward a stored datagram to a connection
"""
_forwarding_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_forwarding_socket.bind(('127.0.0.1', 0))
def __init__(self, datagram_socket):
"""Constructor
Arguments:
datagram_socket -- the root socket; this must be a bound, unconnected
datagram socket
"""
if datagram_socket.type != socket.SOCK_DGRAM:
raise InvalidSocketError("datagram_socket is not of " +
"type SOCK_DGRAM")
try:
datagram_socket.getsockname()
except:
raise InvalidSocketError("datagram_socket is unbound")
try:
datagram_socket.getpeername()
except:
pass
else:
raise InvalidSocketError("datagram_socket is connected")
self.datagram_socket = datagram_socket
self.payload = ""
self.payload_peer_address = None
self.connections = WeakValueDictionary()
def get_connection(self, address):
"""Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance
"""
if self.connections.has_key(address):
return self.connections[address]
# We need a new datagram socket on a dynamically assigned ephemeral port
conn = socket.socket(self._forwarding_socket.family,
self._forwarding_socket.type,
self._forwarding_socket.proto)
conn.bind((self._forwarding_socket.getsockname()[0], 0))
conn.connect(self._forwarding_socket.getsockname())
if not address:
conn.setblocking(0)
self.connections[address] = conn
_logger.debug("Created new connection for address: %s", address)
return conn
def remove_connection(self, address):
"""Remove a muxed connection
Arguments:
address -- an address that was previously returned by the service
method and whose connection has not yet been removed
Return:
the socket object whose connection has been removed
"""
return self.connections.pop(address)
def forward(self):
"""Forward a stored datagram
When the service method returns the address of a new peer, it holds
the datagram from that peer in this instance. In this case, this
method will perform the forwarding step. The target connection is the
one associated with address None if get_connection has not been called
since the service method returned the new peer's address, and the
connection associated with the new peer's address if it has.
"""
assert self.payload
assert self.payload_peer_address
if self.connections.has_key(self.payload_peer_address):
conn = self.connections[self.payload_peer_address]
default = False
else:
conn = self.connections[None] # propagate exception if not created
default = True
_logger.debug("Forwarding datagram from peer: %s, default: %s",
self.payload_peer_address, default)
self._forwarding_socket.sendto(self.payload, conn.getsockname())
self.payload = ""
self.payload_peer_address = None
|
rbit/pydtls | dtls/demux/router.py | UDPDemux.forward | python | def forward(self):
assert self.payload
assert self.payload_peer_address
if self.connections.has_key(self.payload_peer_address):
conn = self.connections[self.payload_peer_address]
default = False
else:
conn = self.connections[None] # propagate exception if not created
default = True
_logger.debug("Forwarding datagram from peer: %s, default: %s",
self.payload_peer_address, default)
self._forwarding_socket.sendto(self.payload, conn.getsockname())
self.payload = ""
self.payload_peer_address = None | Forward a stored datagram
When the service method returns the address of a new peer, it holds
the datagram from that peer in this instance. In this case, this
method will perform the forwarding step. The target connection is the
one associated with address None if get_connection has not been called
since the service method returned the new peer's address, and the
connection associated with the new peer's address if it has. | train | https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/demux/router.py#L166-L189 | null | class UDPDemux(object):
"""Explicitly routing UDP demux
This class implements a demux that forwards packets from the root
socket to sockets belonging to connections. It does this whenever its
service method is invoked.
Methods:
remove_connection -- remove an existing connection
service -- distribute datagrams from the root socket to connections
forward -- forward a stored datagram to a connection
"""
_forwarding_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_forwarding_socket.bind(('127.0.0.1', 0))
def __init__(self, datagram_socket):
"""Constructor
Arguments:
datagram_socket -- the root socket; this must be a bound, unconnected
datagram socket
"""
if datagram_socket.type != socket.SOCK_DGRAM:
raise InvalidSocketError("datagram_socket is not of " +
"type SOCK_DGRAM")
try:
datagram_socket.getsockname()
except:
raise InvalidSocketError("datagram_socket is unbound")
try:
datagram_socket.getpeername()
except:
pass
else:
raise InvalidSocketError("datagram_socket is connected")
self.datagram_socket = datagram_socket
self.payload = ""
self.payload_peer_address = None
self.connections = WeakValueDictionary()
def get_connection(self, address):
"""Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance
"""
if self.connections.has_key(address):
return self.connections[address]
# We need a new datagram socket on a dynamically assigned ephemeral port
conn = socket.socket(self._forwarding_socket.family,
self._forwarding_socket.type,
self._forwarding_socket.proto)
conn.bind((self._forwarding_socket.getsockname()[0], 0))
conn.connect(self._forwarding_socket.getsockname())
if not address:
conn.setblocking(0)
self.connections[address] = conn
_logger.debug("Created new connection for address: %s", address)
return conn
def remove_connection(self, address):
"""Remove a muxed connection
Arguments:
address -- an address that was previously returned by the service
method and whose connection has not yet been removed
Return:
the socket object whose connection has been removed
"""
return self.connections.pop(address)
def service(self):
"""Service the root socket
Read from the root socket and forward one datagram to a
connection. The call will return without forwarding data
if any of the following occurs:
* An error is encountered while reading from the root socket
* Reading from the root socket times out
* The root socket is non-blocking and has no data available
* An empty payload is received
* A non-empty payload is received from an unknown peer (a peer
for which get_connection has not yet been called); in this case,
the payload is held by this instance and will be forwarded when
the forward method is called
Return:
if the datagram received was from a new peer, then the peer's
address; otherwise None
"""
self.payload, self.payload_peer_address = \
self.datagram_socket.recvfrom(UDP_MAX_DGRAM_LENGTH)
_logger.debug("Received datagram from peer: %s",
self.payload_peer_address)
if not self.payload:
self.payload_peer_address = None
return
if self.connections.has_key(self.payload_peer_address):
self.forward()
else:
return self.payload_peer_address
|
itamarst/crochet | examples/scheduling.py | _ExchangeRate.start | python | def start(self):
self._lc = LoopingCall(self._download)
# Run immediately, and then every 30 seconds:
self._lc.start(30, now=True) | Start the background process. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/examples/scheduling.py#L42-L46 | null | class _ExchangeRate(object):
"""Download an exchange rate from Yahoo Finance using Twisted."""
def __init__(self, name):
self._value = None
self._name = name
# External API:
def latest_value(self):
"""Return the latest exchange rate value.
May be None if no value is available.
"""
return self._value
def _download(self):
"""Download the page."""
print("Downloading!")
def parse(result):
print("Got %r back from Yahoo." % (result,))
values = result.strip().split(",")
self._value = float(values[1])
d = getPage(
"http://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=c4l1&s=%s=X"
% (self._name,))
d.addCallback(parse)
d.addErrback(log.err)
return d
|
itamarst/crochet | examples/scheduling.py | _ExchangeRate._download | python | def _download(self):
print("Downloading!")
def parse(result):
print("Got %r back from Yahoo." % (result,))
values = result.strip().split(",")
self._value = float(values[1])
d = getPage(
"http://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=c4l1&s=%s=X"
% (self._name,))
d.addCallback(parse)
d.addErrback(log.err)
return d | Download the page. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/examples/scheduling.py#L48-L60 | null | class _ExchangeRate(object):
"""Download an exchange rate from Yahoo Finance using Twisted."""
def __init__(self, name):
self._value = None
self._name = name
# External API:
def latest_value(self):
"""Return the latest exchange rate value.
May be None if no value is available.
"""
return self._value
def start(self):
"""Start the background process."""
self._lc = LoopingCall(self._download)
# Run immediately, and then every 30 seconds:
self._lc.start(30, now=True)
|
itamarst/crochet | crochet/_eventloop.py | ResultRegistry.register | python | def register(self, result):
if self._stopped:
raise ReactorStopped()
self._results.add(result) | Register an EventualResult.
May be called in any thread. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L80-L88 | null | class ResultRegistry(object):
"""
Keep track of EventualResults.
Once the reactor has shutdown:
1. Registering new EventualResult instances is an error, since no results
will ever become available.
2. Already registered EventualResult instances are "fired" with a
ReactorStopped exception to unblock any remaining EventualResult.wait()
calls.
"""
def __init__(self):
self._results = WeakSet()
self._stopped = False
self._lock = threading.Lock()
@synchronized
@synchronized
def stop(self):
"""
Indicate no more results will get pushed into EventualResults, since
the reactor has stopped.
This should be called in the reactor thread.
"""
self._stopped = True
for result in self._results:
result._set_result(Failure(ReactorStopped()))
|
itamarst/crochet | crochet/_eventloop.py | ResultRegistry.stop | python | def stop(self):
self._stopped = True
for result in self._results:
result._set_result(Failure(ReactorStopped())) | Indicate no more results will get pushed into EventualResults, since
the reactor has stopped.
This should be called in the reactor thread. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L91-L100 | null | class ResultRegistry(object):
"""
Keep track of EventualResults.
Once the reactor has shutdown:
1. Registering new EventualResult instances is an error, since no results
will ever become available.
2. Already registered EventualResult instances are "fired" with a
ReactorStopped exception to unblock any remaining EventualResult.wait()
calls.
"""
def __init__(self):
self._results = WeakSet()
self._stopped = False
self._lock = threading.Lock()
@synchronized
def register(self, result):
"""
Register an EventualResult.
May be called in any thread.
"""
if self._stopped:
raise ReactorStopped()
self._results.add(result)
@synchronized
|
itamarst/crochet | crochet/_eventloop.py | EventualResult._connect_deferred | python | def _connect_deferred(self, deferred):
self._deferred = deferred
# Because we use __del__, we need to make sure there are no cycles
# involving this object, which is why we use a weakref:
def put(result, eventual=weakref.ref(self)):
eventual = eventual()
if eventual:
eventual._set_result(result)
else:
err(result, "Unhandled error in EventualResult")
deferred.addBoth(put) | Hook up the Deferred that that this will be the result of.
Should only be run in Twisted thread, and only called once. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L127-L144 | null | class EventualResult(object):
"""
A blocking interface to Deferred results.
This allows you to access results from Twisted operations that may not be
available immediately, using the wait() method.
In general you should not create these directly; instead use functions
decorated with @run_in_reactor.
"""
def __init__(self, deferred, _reactor):
"""
The deferred parameter should be a Deferred or None indicating
_connect_deferred will be called separately later.
"""
self._deferred = deferred
self._reactor = _reactor
self._value = None
self._result_retrieved = False
self._result_set = threading.Event()
if deferred is not None:
self._connect_deferred(deferred)
def _set_result(self, result):
"""
Set the result of the EventualResult, if not already set.
This can only happen in the reactor thread, either as a result of
Deferred firing, or as a result of ResultRegistry.stop(). So, no need
for thread-safety.
"""
if self._result_set.isSet():
return
self._value = result
self._result_set.set()
def __del__(self):
if self._result_retrieved or not self._result_set.isSet():
return
if isinstance(self._value, Failure):
err(self._value, "Unhandled error in EventualResult")
def cancel(self):
"""
Try to cancel the operation by cancelling the underlying Deferred.
Cancellation of the operation may or may not happen depending on
underlying cancellation support and whether the operation has already
finished. In any case, however, the underlying Deferred will be fired.
Multiple calls will have no additional effect.
"""
self._reactor.callFromThread(lambda: self._deferred.cancel())
def _result(self, timeout=None):
"""
Return the result, if available.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned on one call, additional calls will return/raise the same
result.
"""
if timeout is None:
warnings.warn(
"Unlimited timeouts are deprecated.",
DeprecationWarning,
stacklevel=3)
# Queue.get(None) won't get interrupted by Ctrl-C...
timeout = 2**31
self._result_set.wait(timeout)
# In Python 2.6 we can't rely on the return result of wait(), so we
# have to check manually:
if not self._result_set.is_set():
raise TimeoutError()
self._result_retrieved = True
return self._value
def wait(self, timeout=None):
"""
Return the result, or throw the exception if result is a failure.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned or raised on one call, additional calls will return/raise the
same result.
"""
if threadable.isInIOThread():
raise RuntimeError(
"EventualResult.wait() must not be run in the reactor thread.")
if imp.lock_held():
try:
imp.release_lock()
except RuntimeError:
# The lock is held by some other thread. We should be safe
# to continue.
pass
else:
# If EventualResult.wait() is run during module import, if the
# Twisted code that is being run also imports something the
# result will be a deadlock. Even if that is not an issue it
# would prevent importing in other threads until the call
# returns.
raise RuntimeError(
"EventualResult.wait() must not be run at module "
"import time.")
result = self._result(timeout)
if isinstance(result, Failure):
result.raiseException()
return result
def stash(self):
"""
Store the EventualResult in memory for later retrieval.
Returns a integer uid which can be passed to crochet.retrieve_result()
to retrieve the instance later on.
"""
return _store.store(self)
def original_failure(self):
"""
Return the underlying Failure object, if the result is an error.
If no result is yet available, or the result was not an error, None is
returned.
This method is useful if you want to get the original traceback for an
error result.
"""
try:
result = self._result(0.0)
except TimeoutError:
return None
if isinstance(result, Failure):
return result
else:
return None
|
itamarst/crochet | crochet/_eventloop.py | EventualResult._set_result | python | def _set_result(self, result):
if self._result_set.isSet():
return
self._value = result
self._result_set.set() | Set the result of the EventualResult, if not already set.
This can only happen in the reactor thread, either as a result of
Deferred firing, or as a result of ResultRegistry.stop(). So, no need
for thread-safety. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L146-L157 | null | class EventualResult(object):
"""
A blocking interface to Deferred results.
This allows you to access results from Twisted operations that may not be
available immediately, using the wait() method.
In general you should not create these directly; instead use functions
decorated with @run_in_reactor.
"""
def __init__(self, deferred, _reactor):
"""
The deferred parameter should be a Deferred or None indicating
_connect_deferred will be called separately later.
"""
self._deferred = deferred
self._reactor = _reactor
self._value = None
self._result_retrieved = False
self._result_set = threading.Event()
if deferred is not None:
self._connect_deferred(deferred)
def _connect_deferred(self, deferred):
"""
Hook up the Deferred that that this will be the result of.
Should only be run in Twisted thread, and only called once.
"""
self._deferred = deferred
# Because we use __del__, we need to make sure there are no cycles
# involving this object, which is why we use a weakref:
def put(result, eventual=weakref.ref(self)):
eventual = eventual()
if eventual:
eventual._set_result(result)
else:
err(result, "Unhandled error in EventualResult")
deferred.addBoth(put)
def __del__(self):
if self._result_retrieved or not self._result_set.isSet():
return
if isinstance(self._value, Failure):
err(self._value, "Unhandled error in EventualResult")
def cancel(self):
"""
Try to cancel the operation by cancelling the underlying Deferred.
Cancellation of the operation may or may not happen depending on
underlying cancellation support and whether the operation has already
finished. In any case, however, the underlying Deferred will be fired.
Multiple calls will have no additional effect.
"""
self._reactor.callFromThread(lambda: self._deferred.cancel())
def _result(self, timeout=None):
"""
Return the result, if available.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned on one call, additional calls will return/raise the same
result.
"""
if timeout is None:
warnings.warn(
"Unlimited timeouts are deprecated.",
DeprecationWarning,
stacklevel=3)
# Queue.get(None) won't get interrupted by Ctrl-C...
timeout = 2**31
self._result_set.wait(timeout)
# In Python 2.6 we can't rely on the return result of wait(), so we
# have to check manually:
if not self._result_set.is_set():
raise TimeoutError()
self._result_retrieved = True
return self._value
def wait(self, timeout=None):
"""
Return the result, or throw the exception if result is a failure.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned or raised on one call, additional calls will return/raise the
same result.
"""
if threadable.isInIOThread():
raise RuntimeError(
"EventualResult.wait() must not be run in the reactor thread.")
if imp.lock_held():
try:
imp.release_lock()
except RuntimeError:
# The lock is held by some other thread. We should be safe
# to continue.
pass
else:
# If EventualResult.wait() is run during module import, if the
# Twisted code that is being run also imports something the
# result will be a deadlock. Even if that is not an issue it
# would prevent importing in other threads until the call
# returns.
raise RuntimeError(
"EventualResult.wait() must not be run at module "
"import time.")
result = self._result(timeout)
if isinstance(result, Failure):
result.raiseException()
return result
def stash(self):
"""
Store the EventualResult in memory for later retrieval.
Returns a integer uid which can be passed to crochet.retrieve_result()
to retrieve the instance later on.
"""
return _store.store(self)
def original_failure(self):
"""
Return the underlying Failure object, if the result is an error.
If no result is yet available, or the result was not an error, None is
returned.
This method is useful if you want to get the original traceback for an
error result.
"""
try:
result = self._result(0.0)
except TimeoutError:
return None
if isinstance(result, Failure):
return result
else:
return None
|
itamarst/crochet | crochet/_eventloop.py | EventualResult._result | python | def _result(self, timeout=None):
if timeout is None:
warnings.warn(
"Unlimited timeouts are deprecated.",
DeprecationWarning,
stacklevel=3)
# Queue.get(None) won't get interrupted by Ctrl-C...
timeout = 2**31
self._result_set.wait(timeout)
# In Python 2.6 we can't rely on the return result of wait(), so we
# have to check manually:
if not self._result_set.is_set():
raise TimeoutError()
self._result_retrieved = True
return self._value | Return the result, if available.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned on one call, additional calls will return/raise the same
result. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L177-L203 | null | class EventualResult(object):
"""
A blocking interface to Deferred results.
This allows you to access results from Twisted operations that may not be
available immediately, using the wait() method.
In general you should not create these directly; instead use functions
decorated with @run_in_reactor.
"""
def __init__(self, deferred, _reactor):
"""
The deferred parameter should be a Deferred or None indicating
_connect_deferred will be called separately later.
"""
self._deferred = deferred
self._reactor = _reactor
self._value = None
self._result_retrieved = False
self._result_set = threading.Event()
if deferred is not None:
self._connect_deferred(deferred)
def _connect_deferred(self, deferred):
"""
Hook up the Deferred that that this will be the result of.
Should only be run in Twisted thread, and only called once.
"""
self._deferred = deferred
# Because we use __del__, we need to make sure there are no cycles
# involving this object, which is why we use a weakref:
def put(result, eventual=weakref.ref(self)):
eventual = eventual()
if eventual:
eventual._set_result(result)
else:
err(result, "Unhandled error in EventualResult")
deferred.addBoth(put)
def _set_result(self, result):
"""
Set the result of the EventualResult, if not already set.
This can only happen in the reactor thread, either as a result of
Deferred firing, or as a result of ResultRegistry.stop(). So, no need
for thread-safety.
"""
if self._result_set.isSet():
return
self._value = result
self._result_set.set()
def __del__(self):
if self._result_retrieved or not self._result_set.isSet():
return
if isinstance(self._value, Failure):
err(self._value, "Unhandled error in EventualResult")
def cancel(self):
"""
Try to cancel the operation by cancelling the underlying Deferred.
Cancellation of the operation may or may not happen depending on
underlying cancellation support and whether the operation has already
finished. In any case, however, the underlying Deferred will be fired.
Multiple calls will have no additional effect.
"""
self._reactor.callFromThread(lambda: self._deferred.cancel())
def wait(self, timeout=None):
"""
Return the result, or throw the exception if result is a failure.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned or raised on one call, additional calls will return/raise the
same result.
"""
if threadable.isInIOThread():
raise RuntimeError(
"EventualResult.wait() must not be run in the reactor thread.")
if imp.lock_held():
try:
imp.release_lock()
except RuntimeError:
# The lock is held by some other thread. We should be safe
# to continue.
pass
else:
# If EventualResult.wait() is run during module import, if the
# Twisted code that is being run also imports something the
# result will be a deadlock. Even if that is not an issue it
# would prevent importing in other threads until the call
# returns.
raise RuntimeError(
"EventualResult.wait() must not be run at module "
"import time.")
result = self._result(timeout)
if isinstance(result, Failure):
result.raiseException()
return result
def stash(self):
"""
Store the EventualResult in memory for later retrieval.
Returns a integer uid which can be passed to crochet.retrieve_result()
to retrieve the instance later on.
"""
return _store.store(self)
def original_failure(self):
"""
Return the underlying Failure object, if the result is an error.
If no result is yet available, or the result was not an error, None is
returned.
This method is useful if you want to get the original traceback for an
error result.
"""
try:
result = self._result(0.0)
except TimeoutError:
return None
if isinstance(result, Failure):
return result
else:
return None
|
itamarst/crochet | crochet/_eventloop.py | EventualResult.wait | python | def wait(self, timeout=None):
if threadable.isInIOThread():
raise RuntimeError(
"EventualResult.wait() must not be run in the reactor thread.")
if imp.lock_held():
try:
imp.release_lock()
except RuntimeError:
# The lock is held by some other thread. We should be safe
# to continue.
pass
else:
# If EventualResult.wait() is run during module import, if the
# Twisted code that is being run also imports something the
# result will be a deadlock. Even if that is not an issue it
# would prevent importing in other threads until the call
# returns.
raise RuntimeError(
"EventualResult.wait() must not be run at module "
"import time.")
result = self._result(timeout)
if isinstance(result, Failure):
result.raiseException()
return result | Return the result, or throw the exception if result is a failure.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned or raised on one call, additional calls will return/raise the
same result. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L205-L242 | [
"def _result(self, timeout=None):\n \"\"\"\n Return the result, if available.\n\n It may take an unknown amount of time to return the result, so a\n timeout option is provided. If the given number of seconds pass with\n no result, a TimeoutError will be thrown.\n\n If a previous call timed out, ad... | class EventualResult(object):
"""
A blocking interface to Deferred results.
This allows you to access results from Twisted operations that may not be
available immediately, using the wait() method.
In general you should not create these directly; instead use functions
decorated with @run_in_reactor.
"""
def __init__(self, deferred, _reactor):
"""
The deferred parameter should be a Deferred or None indicating
_connect_deferred will be called separately later.
"""
self._deferred = deferred
self._reactor = _reactor
self._value = None
self._result_retrieved = False
self._result_set = threading.Event()
if deferred is not None:
self._connect_deferred(deferred)
def _connect_deferred(self, deferred):
"""
Hook up the Deferred that that this will be the result of.
Should only be run in Twisted thread, and only called once.
"""
self._deferred = deferred
# Because we use __del__, we need to make sure there are no cycles
# involving this object, which is why we use a weakref:
def put(result, eventual=weakref.ref(self)):
eventual = eventual()
if eventual:
eventual._set_result(result)
else:
err(result, "Unhandled error in EventualResult")
deferred.addBoth(put)
def _set_result(self, result):
"""
Set the result of the EventualResult, if not already set.
This can only happen in the reactor thread, either as a result of
Deferred firing, or as a result of ResultRegistry.stop(). So, no need
for thread-safety.
"""
if self._result_set.isSet():
return
self._value = result
self._result_set.set()
def __del__(self):
if self._result_retrieved or not self._result_set.isSet():
return
if isinstance(self._value, Failure):
err(self._value, "Unhandled error in EventualResult")
def cancel(self):
"""
Try to cancel the operation by cancelling the underlying Deferred.
Cancellation of the operation may or may not happen depending on
underlying cancellation support and whether the operation has already
finished. In any case, however, the underlying Deferred will be fired.
Multiple calls will have no additional effect.
"""
self._reactor.callFromThread(lambda: self._deferred.cancel())
def _result(self, timeout=None):
"""
Return the result, if available.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned on one call, additional calls will return/raise the same
result.
"""
if timeout is None:
warnings.warn(
"Unlimited timeouts are deprecated.",
DeprecationWarning,
stacklevel=3)
# Queue.get(None) won't get interrupted by Ctrl-C...
timeout = 2**31
self._result_set.wait(timeout)
# In Python 2.6 we can't rely on the return result of wait(), so we
# have to check manually:
if not self._result_set.is_set():
raise TimeoutError()
self._result_retrieved = True
return self._value
def stash(self):
"""
Store the EventualResult in memory for later retrieval.
Returns a integer uid which can be passed to crochet.retrieve_result()
to retrieve the instance later on.
"""
return _store.store(self)
def original_failure(self):
"""
Return the underlying Failure object, if the result is an error.
If no result is yet available, or the result was not an error, None is
returned.
This method is useful if you want to get the original traceback for an
error result.
"""
try:
result = self._result(0.0)
except TimeoutError:
return None
if isinstance(result, Failure):
return result
else:
return None
|
itamarst/crochet | crochet/_eventloop.py | EventualResult.original_failure | python | def original_failure(self):
try:
result = self._result(0.0)
except TimeoutError:
return None
if isinstance(result, Failure):
return result
else:
return None | Return the underlying Failure object, if the result is an error.
If no result is yet available, or the result was not an error, None is
returned.
This method is useful if you want to get the original traceback for an
error result. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L253-L270 | [
"def _result(self, timeout=None):\n \"\"\"\n Return the result, if available.\n\n It may take an unknown amount of time to return the result, so a\n timeout option is provided. If the given number of seconds pass with\n no result, a TimeoutError will be thrown.\n\n If a previous call timed out, ad... | class EventualResult(object):
"""
A blocking interface to Deferred results.
This allows you to access results from Twisted operations that may not be
available immediately, using the wait() method.
In general you should not create these directly; instead use functions
decorated with @run_in_reactor.
"""
def __init__(self, deferred, _reactor):
"""
The deferred parameter should be a Deferred or None indicating
_connect_deferred will be called separately later.
"""
self._deferred = deferred
self._reactor = _reactor
self._value = None
self._result_retrieved = False
self._result_set = threading.Event()
if deferred is not None:
self._connect_deferred(deferred)
def _connect_deferred(self, deferred):
"""
Hook up the Deferred that that this will be the result of.
Should only be run in Twisted thread, and only called once.
"""
self._deferred = deferred
# Because we use __del__, we need to make sure there are no cycles
# involving this object, which is why we use a weakref:
def put(result, eventual=weakref.ref(self)):
eventual = eventual()
if eventual:
eventual._set_result(result)
else:
err(result, "Unhandled error in EventualResult")
deferred.addBoth(put)
def _set_result(self, result):
"""
Set the result of the EventualResult, if not already set.
This can only happen in the reactor thread, either as a result of
Deferred firing, or as a result of ResultRegistry.stop(). So, no need
for thread-safety.
"""
if self._result_set.isSet():
return
self._value = result
self._result_set.set()
def __del__(self):
if self._result_retrieved or not self._result_set.isSet():
return
if isinstance(self._value, Failure):
err(self._value, "Unhandled error in EventualResult")
def cancel(self):
"""
Try to cancel the operation by cancelling the underlying Deferred.
Cancellation of the operation may or may not happen depending on
underlying cancellation support and whether the operation has already
finished. In any case, however, the underlying Deferred will be fired.
Multiple calls will have no additional effect.
"""
self._reactor.callFromThread(lambda: self._deferred.cancel())
def _result(self, timeout=None):
"""
Return the result, if available.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned on one call, additional calls will return/raise the same
result.
"""
if timeout is None:
warnings.warn(
"Unlimited timeouts are deprecated.",
DeprecationWarning,
stacklevel=3)
# Queue.get(None) won't get interrupted by Ctrl-C...
timeout = 2**31
self._result_set.wait(timeout)
# In Python 2.6 we can't rely on the return result of wait(), so we
# have to check manually:
if not self._result_set.is_set():
raise TimeoutError()
self._result_retrieved = True
return self._value
def wait(self, timeout=None):
"""
Return the result, or throw the exception if result is a failure.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned or raised on one call, additional calls will return/raise the
same result.
"""
if threadable.isInIOThread():
raise RuntimeError(
"EventualResult.wait() must not be run in the reactor thread.")
if imp.lock_held():
try:
imp.release_lock()
except RuntimeError:
# The lock is held by some other thread. We should be safe
# to continue.
pass
else:
# If EventualResult.wait() is run during module import, if the
# Twisted code that is being run also imports something the
# result will be a deadlock. Even if that is not an issue it
# would prevent importing in other threads until the call
# returns.
raise RuntimeError(
"EventualResult.wait() must not be run at module "
"import time.")
result = self._result(timeout)
if isinstance(result, Failure):
result.raiseException()
return result
def stash(self):
"""
Store the EventualResult in memory for later retrieval.
Returns a integer uid which can be passed to crochet.retrieve_result()
to retrieve the instance later on.
"""
return _store.store(self)
|
itamarst/crochet | crochet/_eventloop.py | EventLoop._startReapingProcesses | python | def _startReapingProcesses(self):
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False) | Start a LoopingCall that calls reapAllProcesses. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L357-L363 | null | class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop)
@synchronized
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def _run_in_reactor(self, function, _, args, kwargs):
"""
Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result
def run_in_reactor(self, function):
"""
A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result
def wait_for_reactor(self, function):
"""
DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently.
"""
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function)
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
def in_reactor(self, function):
"""
DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned.
"""
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor
|
itamarst/crochet | crochet/_eventloop.py | EventLoop._common_setup | python | def _common_setup(self):
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop) | The minimal amount of setup done by both setup() and no_setup(). | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L365-L375 | null | class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _startReapingProcesses(self):
"""
Start a LoopingCall that calls reapAllProcesses.
"""
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False)
@synchronized
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def _run_in_reactor(self, function, _, args, kwargs):
"""
Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result
def run_in_reactor(self, function):
"""
A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result
def wait_for_reactor(self, function):
"""
DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently.
"""
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function)
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
def in_reactor(self, function):
"""
DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned.
"""
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor
|
itamarst/crochet | crochet/_eventloop.py | EventLoop.setup | python | def setup(self):
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start() | Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L378-L418 | [
"def _common_setup(self):\n \"\"\"\n The minimal amount of setup done by both setup() and no_setup().\n \"\"\"\n self._started = True\n self._reactor = self._reactorFactory()\n self._registry = ResultRegistry()\n # We want to unblock EventualResult regardless of how the reactor is\n # run, s... | class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _startReapingProcesses(self):
"""
Start a LoopingCall that calls reapAllProcesses.
"""
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False)
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop)
@synchronized
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def _run_in_reactor(self, function, _, args, kwargs):
"""
Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result
def run_in_reactor(self, function):
"""
A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result
def wait_for_reactor(self, function):
"""
DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently.
"""
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function)
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
def in_reactor(self, function):
"""
DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned.
"""
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor
|
itamarst/crochet | crochet/_eventloop.py | EventLoop._run_in_reactor | python | def _run_in_reactor(self, function, _, args, kwargs):
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result | Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L440-L455 | null | class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _startReapingProcesses(self):
"""
Start a LoopingCall that calls reapAllProcesses.
"""
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False)
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop)
@synchronized
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def run_in_reactor(self, function):
"""
A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result
def wait_for_reactor(self, function):
"""
DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently.
"""
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function)
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
def in_reactor(self, function):
"""
DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned.
"""
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor
|
itamarst/crochet | crochet/_eventloop.py | EventLoop.run_in_reactor | python | def run_in_reactor(self, function):
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result | A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L457-L470 | null | class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _startReapingProcesses(self):
"""
Start a LoopingCall that calls reapAllProcesses.
"""
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False)
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop)
@synchronized
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def _run_in_reactor(self, function, _, args, kwargs):
"""
Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result
def wait_for_reactor(self, function):
"""
DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently.
"""
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function)
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
def in_reactor(self, function):
"""
DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned.
"""
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor
|
itamarst/crochet | crochet/_eventloop.py | EventLoop.wait_for_reactor | python | def wait_for_reactor(self, function):
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function) | DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L472-L488 | [
"def wait_for(self, timeout):\n \"\"\"\n A decorator factory that ensures the wrapped function runs in the\n reactor thread.\n\n When the wrapped function is called, its result is returned or its\n exception raised. Deferreds are handled transparently. Calls will\n timeout after the given number o... | class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _startReapingProcesses(self):
"""
Start a LoopingCall that calls reapAllProcesses.
"""
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False)
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop)
@synchronized
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def _run_in_reactor(self, function, _, args, kwargs):
"""
Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result
def run_in_reactor(self, function):
"""
A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
def in_reactor(self, function):
"""
DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned.
"""
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor
|
itamarst/crochet | crochet/_eventloop.py | EventLoop.wait_for | python | def wait_for(self, timeout):
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator | A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L490-L524 | null | class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _startReapingProcesses(self):
"""
Start a LoopingCall that calls reapAllProcesses.
"""
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False)
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop)
@synchronized
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def _run_in_reactor(self, function, _, args, kwargs):
"""
Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result
def run_in_reactor(self, function):
"""
A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result
def wait_for_reactor(self, function):
"""
DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently.
"""
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function)
def in_reactor(self, function):
"""
DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned.
"""
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor
|
itamarst/crochet | crochet/_eventloop.py | EventLoop.in_reactor | python | def in_reactor(self, function):
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor | DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L526-L548 | [
"def run_in_reactor(self, function):\n \"\"\"\n A decorator that ensures the wrapped function runs in the\n reactor thread.\n\n When the wrapped function is called, an EventualResult is returned.\n \"\"\"\n result = self._run_in_reactor(function)\n # Backwards compatibility; use __wrapped__ ins... | class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _startReapingProcesses(self):
"""
Start a LoopingCall that calls reapAllProcesses.
"""
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False)
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop)
@synchronized
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def _run_in_reactor(self, function, _, args, kwargs):
"""
Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result
def run_in_reactor(self, function):
"""
A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result
def wait_for_reactor(self, function):
"""
DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently.
"""
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function)
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
|
itamarst/crochet | examples/mxquery.py | _mx | python | def _mx(domain):
def got_records(result):
return sorted(
[(int(record.payload.preference), str(record.payload.name))
for record in result[0]])
d = lookupMailExchange(domain)
d.addCallback(got_records)
return d | Return Deferred that fires with a list of (priority, MX domain) tuples for
a given domain. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/examples/mxquery.py#L14-L25 | null | #!/usr/bin/python
"""
A command-line application that uses Twisted to do an MX DNS query.
"""
from __future__ import print_function
from twisted.names.client import lookupMailExchange
from crochet import setup, wait_for
setup()
# Twisted code:
# Blocking wrapper:
@wait_for(timeout=5)
def mx(domain):
"""
Return list of (priority, MX domain) tuples for a given domain.
"""
return _mx(domain)
# Application code:
def main(domain):
print("Mail servers for %s:" % (domain,))
for priority, mailserver in mx(domain):
print(priority, mailserver)
if __name__ == '__main__':
import sys
main(sys.argv[1])
|
itamarst/crochet | crochet/_resultstore.py | ResultStore.store | python | def store(self, deferred_result):
self._counter += 1
self._stored[self._counter] = deferred_result
return self._counter | Store a EventualResult.
Return an integer, a unique identifier that can be used to retrieve
the object. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_resultstore.py#L30-L39 | null | class ResultStore(object):
"""
An in-memory store for EventualResult instances.
Each EventualResult put in the store gets a unique identifier, which can
be used to retrieve it later. This is useful for referring to results in
e.g. web sessions.
EventualResults that are not retrieved by shutdown will be logged if they
have an error result.
"""
def __init__(self):
self._counter = 0
self._stored = {}
self._lock = threading.Lock()
@synchronized
@synchronized
def retrieve(self, result_id):
"""
Return the given EventualResult, and remove it from the store.
"""
return self._stored.pop(result_id)
@synchronized
def log_errors(self):
"""
Log errors for all stored EventualResults that have error results.
"""
for result in self._stored.values():
failure = result.original_failure()
if failure is not None:
log.err(failure, "Unhandled error in stashed EventualResult:")
|
itamarst/crochet | crochet/_resultstore.py | ResultStore.log_errors | python | def log_errors(self):
for result in self._stored.values():
failure = result.original_failure()
if failure is not None:
log.err(failure, "Unhandled error in stashed EventualResult:") | Log errors for all stored EventualResults that have error results. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_resultstore.py#L49-L56 | null | class ResultStore(object):
"""
An in-memory store for EventualResult instances.
Each EventualResult put in the store gets a unique identifier, which can
be used to retrieve it later. This is useful for referring to results in
e.g. web sessions.
EventualResults that are not retrieved by shutdown will be logged if they
have an error result.
"""
def __init__(self):
self._counter = 0
self._stored = {}
self._lock = threading.Lock()
@synchronized
def store(self, deferred_result):
"""
Store a EventualResult.
Return an integer, a unique identifier that can be used to retrieve
the object.
"""
self._counter += 1
self._stored[self._counter] = deferred_result
return self._counter
@synchronized
def retrieve(self, result_id):
"""
Return the given EventualResult, and remove it from the store.
"""
return self._stored.pop(result_id)
@synchronized
|
itamarst/crochet | examples/ssh.py | start_ssh_server | python | def start_ssh_server(port, username, password, namespace):
# This is a lot of boilerplate, see http://tm.tl/6429 for a ticket to
# provide a utility function that simplifies this.
from twisted.internet import reactor
from twisted.conch.insults import insults
from twisted.conch import manhole, manhole_ssh
from twisted.cred.checkers import (
InMemoryUsernamePasswordDatabaseDontUse as MemoryDB)
from twisted.cred.portal import Portal
sshRealm = manhole_ssh.TerminalRealm()
def chainedProtocolFactory():
return insults.ServerProtocol(manhole.Manhole, namespace)
sshRealm.chainedProtocolFactory = chainedProtocolFactory
sshPortal = Portal(sshRealm, [MemoryDB(**{username: password})])
reactor.listenTCP(port, manhole_ssh.ConchFactory(sshPortal),
interface="127.0.0.1") | Start an SSH server on the given port, exposing a Python prompt with the
given namespace. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/examples/ssh.py#L40-L61 | null | #!/usr/bin/python
"""
A demonstration of Conch, allowing you to SSH into a running Python server and
inspect objects at a Python prompt.
If you're using the system install of Twisted, you may need to install Conch
separately, e.g. on Ubuntu:
$ sudo apt-get install python-twisted-conch
Once you've started the program, you can ssh in by doing:
$ ssh admin@localhost -p 5022
The password is 'secret'. Once you've reached the Python prompt, you have
access to the app object, and can import code, etc.:
>>> 3 + 4
7
>>> print(app)
<flask.app.Flask object at 0x18e1690>
"""
import logging
from flask import Flask
from crochet import setup, run_in_reactor
setup()
# Web server:
app = Flask(__name__)
@app.route('/')
def index():
return "Welcome to my boring web server!"
@run_in_reactor
if __name__ == '__main__':
import sys
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
start_ssh_server(5022, "admin", "secret", {"app": app})
app.run()
|
itamarst/crochet | crochet/_util.py | _synced | python | def _synced(method, self, args, kwargs):
with self._lock:
return method(*args, **kwargs) | Underlying synchronized wrapper. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_util.py#L9-L12 | null | """
Utility functions and classes.
"""
import wrapt
@wrapt.decorator
def synchronized(method):
"""
Decorator that wraps a method with an acquire/release of self._lock.
"""
result = _synced(method)
result.synchronized = True
return result
|
itamarst/crochet | crochet/_shutdown.py | FunctionRegistry.register | python | def register(self, f, *args, **kwargs):
self._functions.append(lambda: f(*args, **kwargs)) | Register a function and arguments to be called later. | train | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_shutdown.py#L40-L44 | null | class FunctionRegistry(object):
"""
A registry of functions that can be called all at once.
"""
def __init__(self):
self._functions = []
def run(self):
"""
Run all registered functions in reverse order of registration.
"""
for f in reversed(self._functions):
try:
f()
except:
log.err()
|
ASMfreaK/habitipy | habitipy/util.py | progressed_bar | python | def progressed_bar(count, total=100, status=None, suffix=None, bar_len=10):
status = status or ''
suffix = suffix or '%'
assert isinstance(count, int)
count_normalized = count if count <= total else total
filled_len = int(round(bar_len * count_normalized / float(total)))
percents = 100.0 * count / float(total)
color = '#5cb85c'
if percents < 30.0:
color = '#d9534f'
if percents < 70.0:
color = '#f0ad4e'
text_color = colors.fg(color)
bar_color = text_color + colors.bg(color)
nc_color = colors.dark_gray
progressbar = (colors.bg('#428bca') | status) if status else ''
progressbar += (bar_color | ('█' * filled_len))
progressbar += (nc_color | ('█' * (bar_len - filled_len)))
progressbar += (text_color | (str(count) + suffix))
return progressbar | render a progressed.io like progress bar | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/util.py#L22-L42 | null | """
habitipy - tools and library for Habitica restful API
utility functions
"""
# pylint: disable=invalid-name,bad-whitespace
import os
import gettext
from contextlib import contextmanager
from functools import partial
from textwrap import dedent
import re
from typing import Tuple
# import logging
import pkg_resources
from plumbum import colors
try:
from emoji import emojize
except ImportError:
emojize = None # type: ignore
_progressed_regex_str = r"""
!
\[[^]]*\]
\(\s*http://progressed\.io/bar/(?P<progress>[0-9]{1,3})
(
\?((
(title=(?P<title>[^&) "]*))|
(scale=(?P<scale>[^&) "]*))|
(suffix=(?P<suffix>[^&) "]*))
)&*)*
){0,1}
\s*("[^"]*")*\)
"""
_progressed_regex = re.compile(_progressed_regex_str, re.VERBOSE)
def _progressed_match(m, bar_len=10):
progress = m['progress']
scale = m['scale']
progress = int(progress) if progress is not None else 0
scale = int(scale) if scale is not None else 100
return progressed_bar(
progress, total=scale,
status=m['title'], suffix=m['suffix'],
bar_len=bar_len)
def progressed(string):
"""
helper function to replace all links to progressed.io with progress bars
# Example
```python
from habitipy.util import progressed
text_from_habitica = 'Write thesis '
print(progressed(text_from_habitica))
```
```
Write thesis ██████████0%
```
"""
return _progressed_regex.sub(_progressed_match, string)
def prettify(string):
"""
replace markup emoji and progressbars with actual things
# Example
```python
from habitipy.util import prettify
print(prettify('Write thesis :book: '))
```
```
Write thesis 📖 ██████████0%
```
"""
string = emojize(string, use_aliases=True) if emojize else string
string = progressed(string)
return string
@contextmanager
def umask(mask):
"""
temporarily change umask
# Arguments
mask : a umask (invese of chmod argument)
# Example
```python
with umask(0o077), open('yay.txt') as f:
f.write('nyaroo~n')
```
`yay.txt` will be written with 600 file mode
"""
prev = os.umask(mask)
try:
yield
finally:
os.umask(prev)
secure_filestore = partial(umask, 0o077)
def is_secure_file(fn):
"""checks if a file can be accessed only by the owner"""
st = os.stat(fn)
return (st.st_mode & 0o777) == 0o600
class SecurityError(ValueError):
"""Error fired when a secure file is stored in an insecure manner"""
def assert_secure_file(file):
"""checks if a file is stored securely"""
if not is_secure_file(file):
msg = """
File {0} can be read by other users.
This is not secure. Please run 'chmod 600 "{0}"'"""
raise SecurityError(dedent(msg).replace('\n', ' ').format(file))
return True
def get_translation_for(package_name: str) -> gettext.NullTranslations:
"""find and return gettext translation for package"""
localedir = None
for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:
localefile = gettext.find(package_name, localedir) # type: ignore
if localefile:
break
else:
pass
return gettext.translation(package_name, localedir=localedir, fallback=True) # type: ignore
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)):
"""finds and installs translation functions for package"""
translation = get_translation_for(package_name)
return [getattr(translation, x) for x in names]
|
ASMfreaK/habitipy | habitipy/util.py | prettify | python | def prettify(string):
string = emojize(string, use_aliases=True) if emojize else string
string = progressed(string)
return string | replace markup emoji and progressbars with actual things
# Example
```python
from habitipy.util import prettify
print(prettify('Write thesis :book: '))
```
```
Write thesis 📖 ██████████0%
``` | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/util.py#L89-L104 | [
"def progressed(string):\n \"\"\"\n helper function to replace all links to progressed.io with progress bars\n\n # Example\n ```python\n from habitipy.util import progressed\n text_from_habitica = 'Write thesis '\n print(progressed(text_from_h... | """
habitipy - tools and library for Habitica restful API
utility functions
"""
# pylint: disable=invalid-name,bad-whitespace
import os
import gettext
from contextlib import contextmanager
from functools import partial
from textwrap import dedent
import re
from typing import Tuple
# import logging
import pkg_resources
from plumbum import colors
try:
from emoji import emojize
except ImportError:
emojize = None # type: ignore
def progressed_bar(count, total=100, status=None, suffix=None, bar_len=10):
"""render a progressed.io like progress bar"""
status = status or ''
suffix = suffix or '%'
assert isinstance(count, int)
count_normalized = count if count <= total else total
filled_len = int(round(bar_len * count_normalized / float(total)))
percents = 100.0 * count / float(total)
color = '#5cb85c'
if percents < 30.0:
color = '#d9534f'
if percents < 70.0:
color = '#f0ad4e'
text_color = colors.fg(color)
bar_color = text_color + colors.bg(color)
nc_color = colors.dark_gray
progressbar = (colors.bg('#428bca') | status) if status else ''
progressbar += (bar_color | ('█' * filled_len))
progressbar += (nc_color | ('█' * (bar_len - filled_len)))
progressbar += (text_color | (str(count) + suffix))
return progressbar
_progressed_regex_str = r"""
!
\[[^]]*\]
\(\s*http://progressed\.io/bar/(?P<progress>[0-9]{1,3})
(
\?((
(title=(?P<title>[^&) "]*))|
(scale=(?P<scale>[^&) "]*))|
(suffix=(?P<suffix>[^&) "]*))
)&*)*
){0,1}
\s*("[^"]*")*\)
"""
_progressed_regex = re.compile(_progressed_regex_str, re.VERBOSE)
def _progressed_match(m, bar_len=10):
progress = m['progress']
scale = m['scale']
progress = int(progress) if progress is not None else 0
scale = int(scale) if scale is not None else 100
return progressed_bar(
progress, total=scale,
status=m['title'], suffix=m['suffix'],
bar_len=bar_len)
def progressed(string):
"""
helper function to replace all links to progressed.io with progress bars
# Example
```python
from habitipy.util import progressed
text_from_habitica = 'Write thesis '
print(progressed(text_from_habitica))
```
```
Write thesis ██████████0%
```
"""
return _progressed_regex.sub(_progressed_match, string)
@contextmanager
def umask(mask):
"""
temporarily change umask
# Arguments
mask : a umask (invese of chmod argument)
# Example
```python
with umask(0o077), open('yay.txt') as f:
f.write('nyaroo~n')
```
`yay.txt` will be written with 600 file mode
"""
prev = os.umask(mask)
try:
yield
finally:
os.umask(prev)
secure_filestore = partial(umask, 0o077)
def is_secure_file(fn):
"""checks if a file can be accessed only by the owner"""
st = os.stat(fn)
return (st.st_mode & 0o777) == 0o600
class SecurityError(ValueError):
"""Error fired when a secure file is stored in an insecure manner"""
def assert_secure_file(file):
"""checks if a file is stored securely"""
if not is_secure_file(file):
msg = """
File {0} can be read by other users.
This is not secure. Please run 'chmod 600 "{0}"'"""
raise SecurityError(dedent(msg).replace('\n', ' ').format(file))
return True
def get_translation_for(package_name: str) -> gettext.NullTranslations:
"""find and return gettext translation for package"""
localedir = None
for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:
localefile = gettext.find(package_name, localedir) # type: ignore
if localefile:
break
else:
pass
return gettext.translation(package_name, localedir=localedir, fallback=True) # type: ignore
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)):
"""finds and installs translation functions for package"""
translation = get_translation_for(package_name)
return [getattr(translation, x) for x in names]
|
ASMfreaK/habitipy | habitipy/util.py | assert_secure_file | python | def assert_secure_file(file):
if not is_secure_file(file):
msg = """
File {0} can be read by other users.
This is not secure. Please run 'chmod 600 "{0}"'"""
raise SecurityError(dedent(msg).replace('\n', ' ').format(file))
return True | checks if a file is stored securely | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/util.py#L144-L151 | [
"def is_secure_file(fn):\n \"\"\"checks if a file can be accessed only by the owner\"\"\"\n st = os.stat(fn)\n return (st.st_mode & 0o777) == 0o600\n"
] | """
habitipy - tools and library for Habitica restful API
utility functions
"""
# pylint: disable=invalid-name,bad-whitespace
import os
import gettext
from contextlib import contextmanager
from functools import partial
from textwrap import dedent
import re
from typing import Tuple
# import logging
import pkg_resources
from plumbum import colors
try:
from emoji import emojize
except ImportError:
emojize = None # type: ignore
def progressed_bar(count, total=100, status=None, suffix=None, bar_len=10):
"""render a progressed.io like progress bar"""
status = status or ''
suffix = suffix or '%'
assert isinstance(count, int)
count_normalized = count if count <= total else total
filled_len = int(round(bar_len * count_normalized / float(total)))
percents = 100.0 * count / float(total)
color = '#5cb85c'
if percents < 30.0:
color = '#d9534f'
if percents < 70.0:
color = '#f0ad4e'
text_color = colors.fg(color)
bar_color = text_color + colors.bg(color)
nc_color = colors.dark_gray
progressbar = (colors.bg('#428bca') | status) if status else ''
progressbar += (bar_color | ('█' * filled_len))
progressbar += (nc_color | ('█' * (bar_len - filled_len)))
progressbar += (text_color | (str(count) + suffix))
return progressbar
_progressed_regex_str = r"""
!
\[[^]]*\]
\(\s*http://progressed\.io/bar/(?P<progress>[0-9]{1,3})
(
\?((
(title=(?P<title>[^&) "]*))|
(scale=(?P<scale>[^&) "]*))|
(suffix=(?P<suffix>[^&) "]*))
)&*)*
){0,1}
\s*("[^"]*")*\)
"""
_progressed_regex = re.compile(_progressed_regex_str, re.VERBOSE)
def _progressed_match(m, bar_len=10):
progress = m['progress']
scale = m['scale']
progress = int(progress) if progress is not None else 0
scale = int(scale) if scale is not None else 100
return progressed_bar(
progress, total=scale,
status=m['title'], suffix=m['suffix'],
bar_len=bar_len)
def progressed(string):
"""
helper function to replace all links to progressed.io with progress bars
# Example
```python
from habitipy.util import progressed
text_from_habitica = 'Write thesis '
print(progressed(text_from_habitica))
```
```
Write thesis ██████████0%
```
"""
return _progressed_regex.sub(_progressed_match, string)
def prettify(string):
"""
replace markup emoji and progressbars with actual things
# Example
```python
from habitipy.util import prettify
print(prettify('Write thesis :book: '))
```
```
Write thesis 📖 ██████████0%
```
"""
string = emojize(string, use_aliases=True) if emojize else string
string = progressed(string)
return string
@contextmanager
def umask(mask):
"""
temporarily change umask
# Arguments
mask : a umask (invese of chmod argument)
# Example
```python
with umask(0o077), open('yay.txt') as f:
f.write('nyaroo~n')
```
`yay.txt` will be written with 600 file mode
"""
prev = os.umask(mask)
try:
yield
finally:
os.umask(prev)
secure_filestore = partial(umask, 0o077)
def is_secure_file(fn):
"""checks if a file can be accessed only by the owner"""
st = os.stat(fn)
return (st.st_mode & 0o777) == 0o600
class SecurityError(ValueError):
"""Error fired when a secure file is stored in an insecure manner"""
def get_translation_for(package_name: str) -> gettext.NullTranslations:
"""find and return gettext translation for package"""
localedir = None
for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:
localefile = gettext.find(package_name, localedir) # type: ignore
if localefile:
break
else:
pass
return gettext.translation(package_name, localedir=localedir, fallback=True) # type: ignore
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)):
"""finds and installs translation functions for package"""
translation = get_translation_for(package_name)
return [getattr(translation, x) for x in names]
|
ASMfreaK/habitipy | habitipy/util.py | get_translation_for | python | def get_translation_for(package_name: str) -> gettext.NullTranslations:
localedir = None
for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:
localefile = gettext.find(package_name, localedir) # type: ignore
if localefile:
break
else:
pass
return gettext.translation(package_name, localedir=localedir, fallback=True) | find and return gettext translation for package | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/util.py#L154-L163 | null | """
habitipy - tools and library for Habitica restful API
utility functions
"""
# pylint: disable=invalid-name,bad-whitespace
import os
import gettext
from contextlib import contextmanager
from functools import partial
from textwrap import dedent
import re
from typing import Tuple
# import logging
import pkg_resources
from plumbum import colors
try:
from emoji import emojize
except ImportError:
emojize = None # type: ignore
def progressed_bar(count, total=100, status=None, suffix=None, bar_len=10):
"""render a progressed.io like progress bar"""
status = status or ''
suffix = suffix or '%'
assert isinstance(count, int)
count_normalized = count if count <= total else total
filled_len = int(round(bar_len * count_normalized / float(total)))
percents = 100.0 * count / float(total)
color = '#5cb85c'
if percents < 30.0:
color = '#d9534f'
if percents < 70.0:
color = '#f0ad4e'
text_color = colors.fg(color)
bar_color = text_color + colors.bg(color)
nc_color = colors.dark_gray
progressbar = (colors.bg('#428bca') | status) if status else ''
progressbar += (bar_color | ('█' * filled_len))
progressbar += (nc_color | ('█' * (bar_len - filled_len)))
progressbar += (text_color | (str(count) + suffix))
return progressbar
_progressed_regex_str = r"""
!
\[[^]]*\]
\(\s*http://progressed\.io/bar/(?P<progress>[0-9]{1,3})
(
\?((
(title=(?P<title>[^&) "]*))|
(scale=(?P<scale>[^&) "]*))|
(suffix=(?P<suffix>[^&) "]*))
)&*)*
){0,1}
\s*("[^"]*")*\)
"""
_progressed_regex = re.compile(_progressed_regex_str, re.VERBOSE)
def _progressed_match(m, bar_len=10):
progress = m['progress']
scale = m['scale']
progress = int(progress) if progress is not None else 0
scale = int(scale) if scale is not None else 100
return progressed_bar(
progress, total=scale,
status=m['title'], suffix=m['suffix'],
bar_len=bar_len)
def progressed(string):
"""
helper function to replace all links to progressed.io with progress bars
# Example
```python
from habitipy.util import progressed
text_from_habitica = 'Write thesis '
print(progressed(text_from_habitica))
```
```
Write thesis ██████████0%
```
"""
return _progressed_regex.sub(_progressed_match, string)
def prettify(string):
"""
replace markup emoji and progressbars with actual things
# Example
```python
from habitipy.util import prettify
print(prettify('Write thesis :book: '))
```
```
Write thesis 📖 ██████████0%
```
"""
string = emojize(string, use_aliases=True) if emojize else string
string = progressed(string)
return string
@contextmanager
def umask(mask):
"""
temporarily change umask
# Arguments
mask : a umask (invese of chmod argument)
# Example
```python
with umask(0o077), open('yay.txt') as f:
f.write('nyaroo~n')
```
`yay.txt` will be written with 600 file mode
"""
prev = os.umask(mask)
try:
yield
finally:
os.umask(prev)
secure_filestore = partial(umask, 0o077)
def is_secure_file(fn):
"""checks if a file can be accessed only by the owner"""
st = os.stat(fn)
return (st.st_mode & 0o777) == 0o600
class SecurityError(ValueError):
"""Error fired when a secure file is stored in an insecure manner"""
def assert_secure_file(file):
"""checks if a file is stored securely"""
if not is_secure_file(file):
msg = """
File {0} can be read by other users.
This is not secure. Please run 'chmod 600 "{0}"'"""
raise SecurityError(dedent(msg).replace('\n', ' ').format(file))
return True
# type: ignore
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)):
"""finds and installs translation functions for package"""
translation = get_translation_for(package_name)
return [getattr(translation, x) for x in names]
|
ASMfreaK/habitipy | habitipy/util.py | get_translation_functions | python | def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)):
translation = get_translation_for(package_name)
return [getattr(translation, x) for x in names] | finds and installs translation functions for package | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/util.py#L166-L169 | [
"def get_translation_for(package_name: str) -> gettext.NullTranslations:\n \"\"\"find and return gettext translation for package\"\"\"\n localedir = None\n for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:\n localefile = gettext.find(package_name, localedir) # type: igno... | """
habitipy - tools and library for Habitica restful API
utility functions
"""
# pylint: disable=invalid-name,bad-whitespace
import os
import gettext
from contextlib import contextmanager
from functools import partial
from textwrap import dedent
import re
from typing import Tuple
# import logging
import pkg_resources
from plumbum import colors
try:
from emoji import emojize
except ImportError:
emojize = None # type: ignore
def progressed_bar(count, total=100, status=None, suffix=None, bar_len=10):
"""render a progressed.io like progress bar"""
status = status or ''
suffix = suffix or '%'
assert isinstance(count, int)
count_normalized = count if count <= total else total
filled_len = int(round(bar_len * count_normalized / float(total)))
percents = 100.0 * count / float(total)
color = '#5cb85c'
if percents < 30.0:
color = '#d9534f'
if percents < 70.0:
color = '#f0ad4e'
text_color = colors.fg(color)
bar_color = text_color + colors.bg(color)
nc_color = colors.dark_gray
progressbar = (colors.bg('#428bca') | status) if status else ''
progressbar += (bar_color | ('█' * filled_len))
progressbar += (nc_color | ('█' * (bar_len - filled_len)))
progressbar += (text_color | (str(count) + suffix))
return progressbar
_progressed_regex_str = r"""
!
\[[^]]*\]
\(\s*http://progressed\.io/bar/(?P<progress>[0-9]{1,3})
(
\?((
(title=(?P<title>[^&) "]*))|
(scale=(?P<scale>[^&) "]*))|
(suffix=(?P<suffix>[^&) "]*))
)&*)*
){0,1}
\s*("[^"]*")*\)
"""
_progressed_regex = re.compile(_progressed_regex_str, re.VERBOSE)
def _progressed_match(m, bar_len=10):
progress = m['progress']
scale = m['scale']
progress = int(progress) if progress is not None else 0
scale = int(scale) if scale is not None else 100
return progressed_bar(
progress, total=scale,
status=m['title'], suffix=m['suffix'],
bar_len=bar_len)
def progressed(string):
"""
helper function to replace all links to progressed.io with progress bars
# Example
```python
from habitipy.util import progressed
text_from_habitica = 'Write thesis '
print(progressed(text_from_habitica))
```
```
Write thesis ██████████0%
```
"""
return _progressed_regex.sub(_progressed_match, string)
def prettify(string):
"""
replace markup emoji and progressbars with actual things
# Example
```python
from habitipy.util import prettify
print(prettify('Write thesis :book: '))
```
```
Write thesis 📖 ██████████0%
```
"""
string = emojize(string, use_aliases=True) if emojize else string
string = progressed(string)
return string
@contextmanager
def umask(mask):
"""
temporarily change umask
# Arguments
mask : a umask (invese of chmod argument)
# Example
```python
with umask(0o077), open('yay.txt') as f:
f.write('nyaroo~n')
```
`yay.txt` will be written with 600 file mode
"""
prev = os.umask(mask)
try:
yield
finally:
os.umask(prev)
secure_filestore = partial(umask, 0o077)
def is_secure_file(fn):
"""checks if a file can be accessed only by the owner"""
st = os.stat(fn)
return (st.st_mode & 0o777) == 0o600
class SecurityError(ValueError):
"""Error fired when a secure file is stored in an insecure manner"""
def assert_secure_file(file):
"""checks if a file is stored securely"""
if not is_secure_file(file):
msg = """
File {0} can be read by other users.
This is not secure. Please run 'chmod 600 "{0}"'"""
raise SecurityError(dedent(msg).replace('\n', ' ').format(file))
return True
def get_translation_for(package_name: str) -> gettext.NullTranslations:
"""find and return gettext translation for package"""
localedir = None
for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:
localefile = gettext.find(package_name, localedir) # type: ignore
if localefile:
break
else:
pass
return gettext.translation(package_name, localedir=localedir, fallback=True) # type: ignore
|
ASMfreaK/habitipy | habitipy/api.py | escape_keywords | python | def escape_keywords(arr):
for i in arr:
i = i if i not in kwlist else i + '_'
i = i if '-' not in i else i.replace('-', '_')
yield i | append _ to all python keywords | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L91-L96 | null | """
habitipy - tools and library for Habitica restful API
RESTful api abstraction module using requests
"""
# pylint: disable=invalid-name,too-few-public-methods,too-many-locals, bad-continuation
# pylint: disable=bad-whitespace
import json
import re
import uuid
from keyword import kwlist
import warnings
import textwrap
from collections import defaultdict
from typing import Dict, Union, List, Tuple, Iterator, Any, Optional
import pkg_resources
import requests
from plumbum import local
from .util import get_translation_functions
API_URI_BASE = '/api/v3'
API_CONTENT_TYPE = 'application/json'
APIDOC_LOCAL_FILE = '~/.config/habitipy/apidoc.txt'
_, ngettext = get_translation_functions('habitipy', names=('gettext', 'ngettext'))
class ParamAlreadyExist(ValueError):
"""Custom error type"""
class WrongReturnCode(ValueError):
"""Custom error type"""
class WrongData(ValueError):
"""Custom error type"""
class WrongPath(ValueError):
"""Custom error type"""
class ApiNode:
"""Represents a middle point in API"""
def __init__(self, param_name=None, param=None, paths=None):
self.param = param
self.param_name = param_name
self.paths = paths or {} # type: Dict[str, Union[ApiNode,ApiEndpoint]]
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
"""Get another leaf node with name `val` if possible"""
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) # NOQA: Q000
def can_into(self, val: str) -> bool:
"""Determine if there is a leaf node with name `val`"""
return val in self.paths or (self.param and self.param_name == val)
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val
def keys(self) -> Iterator[str]:
"""return all possible paths one can take from this ApiNode"""
if self.param:
yield self.param_name
yield from self.paths.keys()
def __repr__(self) -> str:
text = '<ApiNode {self.param_name}: {self.param} paths: {self.paths}>'
return text.format(self=self)
def is_param(self, val):
"""checks if val is this node's param"""
return val == self.param_name
class Habitipy:
"""
Represents Habitica API
# Arguments
conf : Configuration dictionary for API. Should contain `url`, `login` and `password` fields
apis (None, List[ApiEndpoint], ApiNode): Field, representing API endpoints.
current : current position in the API
from_github : whether it is needed to download apiDoc from habitica's github
branch : branch to use to download apiDoc from habitica's github
strict : show warnings on inconsistent apiDocs
# Example
```python
from habitipy import Habitipy
conf = {
'url': 'https://habitica.com',
'login': 'your-login-uuid-to-replace',
'password': 'your-password-uuid-here'
api = Habitipy(conf)
print(api.user.get())
```
Interactive help:
```python
In [1]: from habitipy import Habitipy, load_conf,DEFAULT_CONF
In [2]: api = Habitipy(load_conf(DEFAULT_CONF))
In [3]: api.<tab>
api.approvals api.debug api.models api.tags
api.challenges api.group api.notifications api.tasks
api.content api.groups api.reorder-tags api.user
api.coupons api.hall api.shops
api.cron api.members api.status
In [84]: api.user.get?
Signature: api.user.get(**kwargs)
Type: Habitipy
String form: <habitipy.api.Habitipy object at 0x7fa6fd7966d8>
File: ~/projects/python/habitica/habitipy/api.py
Docstring:
{get} /api/v3/user Get the authenticated user's profile
responce params:
"data" of type "object"
```
From other Python consoles you can just run:
```python
>>> dir(api)
['__call__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__',
'__format__', '__ge__', '__getattr__', '__getattribute__', '__getitem__', '__gt__',
'__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__',
'__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__', '_apis', '_conf', '_current', '_is_request',
'_make_apis_dict', '_make_headers', '_node',
'approvals', 'challenges', 'content', 'coupons', 'cron', 'debug', 'group', 'groups', 'hall',
'members', 'models', 'notifications', 'reorder-tags',
'shops', 'status', 'tags', 'tasks', 'user']
>>> print(api.user.get.__doc__)
{get} /api/v3/user Get the authenticated user's profile
responce params:
"data" of type "object"
```
"""
def __init__(self, conf: Dict[str, str], *,
apis=None, current: Optional[List[str]] = None,
from_github=False, branch=None,
strict=False) -> None:
self._conf = conf
self._strict = strict
if isinstance(apis, (type(None), list)):
if not apis:
fn = local.path(APIDOC_LOCAL_FILE)
if not fn.exists():
fn = pkg_resources.resource_filename('habitipy', 'apidoc.txt')
fn = branch if from_github else fn
with warnings.catch_warnings():
warnings.simplefilter('error' if strict else 'ignore')
apis = parse_apidoc(fn, from_github)
with warnings.catch_warnings():
warnings.simplefilter('error' if strict else 'ignore')
apis = self._make_apis_dict(apis)
if isinstance(apis, ApiNode):
self._apis = apis
else:
raise TypeError('Possible apis {} have wrong type({})'.format(apis, type(apis)))
current = current or ['api', 'v3']
if not isinstance(current, list):
raise TypeError('Wrong current api position {}'.format(current))
_node = self._apis # type: Union[ApiNode, ApiEndpoint]
for part in current:
if isinstance(_node, ApiNode):
_node = _node.into(part)
else:
raise WrongPath("""Can't enter into {} with part {}""".format(_node, part))
self._node = _node
self._current = current
if isinstance(self._node, ApiEndpoint):
self.__doc__ = self._node.render_docstring()
@staticmethod
def _make_apis_dict(apis) -> ApiNode:
node = ApiNode()
for api in apis:
cur_node = node
prev_part = ''
for part in api.parted_uri:
if cur_node.can_into(part):
_node = cur_node.into(part)
else:
try:
_node = cur_node.place(part, ApiNode())
except ParamAlreadyExist:
warnings.warn('Ignoring conflicting param. Don\'t use {}'.format(api.uri))
_node = cur_node.param
cur_node = _node # type: ignore
prev_part += '/' + part
cur_node.place(api.method, api)
return node
def _make_headers(self):
headers = {
'x-api-user': self._conf['login'],
'x-api-key': self._conf['password']
} if self._conf else {}
headers.update({'content-type': API_CONTENT_TYPE})
return headers
def __dir__(self):
return super().__dir__() + list(escape_keywords(self._node.keys()))
def __getattr__(self, val: str) -> Union[Any, 'Habitipy']:
if val in dir(super()):
# pylint: disable=no-member
return super().__getattr__(val) # type:ignore
val = val if not val.endswith('_') else val.rstrip('_')
val = val if '_' not in val else val.replace('_', '-')
return self.__class__(self._conf, apis=self._apis, current=self._current + [val])
def __getitem__(self, val: Union[str, List[str], Tuple[str, ...]]) -> 'Habitipy':
if isinstance(val, str):
return self.__class__(self._conf, apis=self._apis, current=self._current + [val])
if isinstance(val, (list, tuple)):
current = self._current + list(val)
return self.__class__(self._conf, apis=self._apis, current=current)
raise IndexError('{} not found in this API!'.format(val))
def _prepare_request(self, backend=requests, **kwargs):
uri = '/'.join([self._conf['url']] + self._current[:-1])
if not isinstance(self._node, ApiEndpoint):
raise ValueError('{} is not an endpoint!'.format(uri))
method = self._node.method
headers = self._make_headers()
query = {}
if 'query' in self._node.params:
for name, param in self._node.params['query'].items():
if name in kwargs:
query[name] = kwargs.pop(name)
elif not param.is_optional:
raise TypeError('Mandatory param {} is missing'.format(name))
request = getattr(backend, method)
request_args = (uri,)
request_kwargs = dict(headers=headers, params=query)
if method in ['put', 'post', 'delete']:
request_kwargs['data'] = json.dumps(kwargs)
return request, request_args, request_kwargs
def _request(self, request, request_args, request_kwargs):
res = request(*request_args, **request_kwargs)
if res.status_code != self._node.retcode:
res.raise_for_status()
msg = _("""
Got return code {res.status_code}, but {node.retcode} was
expected for {node.uri}. It may be a typo in Habitica apiDoc.
Please file an issue to https://github.com/HabitRPG/habitica/issues""")
msg = textwrap.dedent(msg)
msg = msg.replace('\n', ' ').format(res=res, node=self._node)
if self._strict:
raise WrongReturnCode(msg)
else:
warnings.warn(msg)
return res.json()['data']
def __call__(self, **kwargs) -> Union[Dict, List]:
return self._request(*self._prepare_request(**kwargs))
def download_api(branch=None) -> str:
"""download API documentation from _branch_ of Habitica\'s repo on Github"""
habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica'
if not branch:
branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name']
curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)]
tar = local['tar'][
'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout']
grep = local['grep']['@api']
sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-']
return (curl | tar | grep | sed)()
def save_apidoc(text: str) -> None:
"""save `text` to apidoc cache"""
apidoc_local = local.path(APIDOC_LOCAL_FILE)
if not apidoc_local.dirname.exists():
apidoc_local.dirname.mkdir()
with open(apidoc_local, 'w') as f:
f.write(text)
def parse_apidoc(
file_or_branch,
from_github=False,
save_github_version=True
) -> List['ApiEndpoint']:
"""read file and parse apiDoc lines"""
apis = [] # type: List[ApiEndpoint]
regex = r'(?P<group>\([^)]*\)){0,1} *(?P<type_>{[^}]*}){0,1} *'
regex += r'(?P<field>[^ ]*) *(?P<description>.*)$'
param_regex = re.compile(r'^@apiParam {1,}' + regex)
success_regex = re.compile(r'^@apiSuccess {1,}' + regex)
if from_github:
text = download_api(file_or_branch)
if save_github_version:
save_apidoc(text)
else:
with open(file_or_branch) as f:
text = f.read()
for line in text.split('\n'):
line = line.replace('\n', '')
if line.startswith('@api '):
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
split_line = line.split(' ')
assert len(split_line) >= 3
method = split_line[1]
uri = split_line[2]
assert method[0] == '{'
assert method[-1] == '}'
method = method[1:-1]
if not uri.startswith(API_URI_BASE):
warnings.warn(_("Wrong api url: {}").format(uri)) # noqa: Q000
title = ' '.join(split_line[3:])
apis.append(ApiEndpoint(method, uri, title))
elif line.startswith('@apiParam '):
res = next(param_regex.finditer(line)).groupdict()
apis[-1].add_param(**res)
elif line.startswith('@apiSuccess '):
res = next(success_regex.finditer(line)).groupdict()
apis[-1].add_success(**res)
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
return apis
class ApiEndpoint:
"""
Represents a single api endpoint.
"""
def __init__(self, method, uri, title=''):
self.method = method
self.uri = uri
self.parted_uri = uri[1:].split('/')
self.title = title
self.params = defaultdict(dict)
self.retcode = None
def add_param(self, group=None, type_='', field='', description=''):
"""parse and append a param"""
group = group or '(Parameter)'
group = group.lower()[1:-1]
p = Param(type_, field, description)
self.params[group][p.field] = p
def add_success(self, group=None, type_='', field='', description=''):
"""parse and append a success data param"""
group = group or '(200)'
group = int(group.lower()[1:-1])
self.retcode = self.retcode or group
if group != self.retcode:
raise ValueError('Two or more retcodes!')
type_ = type_ or '{String}'
p = Param(type_, field, description)
self.params['responce'][p.field] = p
def __repr__(self):
return '<@api {{{self.method}}} {self.uri} {self.title}>'.format(self=self)
def render_docstring(self):
"""make a nice docstring for ipython"""
res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self)
if self.params:
for group, params in self.params.items():
res += '\n' + group + ' params:\n'
for param in params.values():
res += param.render_docstring()
return res
# TODO: fix type checking
_valid_types = {
'string': lambda x: isinstance(x, str),
'sring': lambda x: isinstance(x, str),
'number': lambda x: isinstance(x, float),
'uuid': lambda u: isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex
}
class Param:
"""represents param of request or responce"""
def __init__(self, type_, field, description):
self.is_optional = field[0] == '[' and field[-1] == ']'
self.field = field[1:-1] if self.is_optional else field
if '=' in self.field:
self.field, self.default = self.field.split('=')
else:
self.default = ''
self.field = self.field.split('.')
if len(self.field) > 1:
self.path, self.field = self.field[:-1], self.field[-1]
else:
self.field = self.field[0]
self.path = []
if type_:
self.type = type_[1:-1] if len(type_) > 2 else type_
if '=' in self.type:
self.type, self.possible_values = self.type.split('=')
self.possible_values = list(map(
lambda s: s if s[0] != '"' else s[1:-1],
self.possible_values.split(',')))
else:
self.possible_values = []
self.type = self.type.lower()
else:
self.type = None
self.possible_values = []
self.description = description
def validate(self, obj):
"""check if obj has this api param"""
if self.path:
for i in self.path:
obj = obj[i]
obj = obj[self.field]
raise NotImplementedError('Validation is not implemented yet')
def render_docstring(self):
"""make a nice docstring for ipython"""
default = (' = ' + str(self.default)) if self.default else ''
opt = 'optional' if self.is_optional else ''
can_be = ' '.join(self.possible_values) if self.possible_values else ''
can_be = 'one of [{}]'.format(can_be) if can_be else ''
type_ = 'of type "' + str(self.type) + '"'
res = ' '.join([opt, '"' + self.field + '"', default, type_, can_be, '\n'])
return res.replace(' ', ' ').lstrip()
|
ASMfreaK/habitipy | habitipy/api.py | download_api | python | def download_api(branch=None) -> str:
habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica'
if not branch:
branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name']
curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)]
tar = local['tar'][
'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout']
grep = local['grep']['@api']
sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-']
return (curl | tar | grep | sed)() | download API documentation from _branch_ of Habitica\'s repo on Github | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L287-L297 | null | """
habitipy - tools and library for Habitica restful API
RESTful api abstraction module using requests
"""
# pylint: disable=invalid-name,too-few-public-methods,too-many-locals, bad-continuation
# pylint: disable=bad-whitespace
import json
import re
import uuid
from keyword import kwlist
import warnings
import textwrap
from collections import defaultdict
from typing import Dict, Union, List, Tuple, Iterator, Any, Optional
import pkg_resources
import requests
from plumbum import local
from .util import get_translation_functions
API_URI_BASE = '/api/v3'
API_CONTENT_TYPE = 'application/json'
APIDOC_LOCAL_FILE = '~/.config/habitipy/apidoc.txt'
_, ngettext = get_translation_functions('habitipy', names=('gettext', 'ngettext'))
class ParamAlreadyExist(ValueError):
"""Custom error type"""
class WrongReturnCode(ValueError):
"""Custom error type"""
class WrongData(ValueError):
"""Custom error type"""
class WrongPath(ValueError):
"""Custom error type"""
class ApiNode:
"""Represents a middle point in API"""
def __init__(self, param_name=None, param=None, paths=None):
self.param = param
self.param_name = param_name
self.paths = paths or {} # type: Dict[str, Union[ApiNode,ApiEndpoint]]
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
"""Get another leaf node with name `val` if possible"""
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) # NOQA: Q000
def can_into(self, val: str) -> bool:
"""Determine if there is a leaf node with name `val`"""
return val in self.paths or (self.param and self.param_name == val)
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val
def keys(self) -> Iterator[str]:
"""return all possible paths one can take from this ApiNode"""
if self.param:
yield self.param_name
yield from self.paths.keys()
def __repr__(self) -> str:
text = '<ApiNode {self.param_name}: {self.param} paths: {self.paths}>'
return text.format(self=self)
def is_param(self, val):
"""checks if val is this node's param"""
return val == self.param_name
def escape_keywords(arr):
"""append _ to all python keywords"""
for i in arr:
i = i if i not in kwlist else i + '_'
i = i if '-' not in i else i.replace('-', '_')
yield i
class Habitipy:
"""
Represents Habitica API
# Arguments
conf : Configuration dictionary for API. Should contain `url`, `login` and `password` fields
apis (None, List[ApiEndpoint], ApiNode): Field, representing API endpoints.
current : current position in the API
from_github : whether it is needed to download apiDoc from habitica's github
branch : branch to use to download apiDoc from habitica's github
strict : show warnings on inconsistent apiDocs
# Example
```python
from habitipy import Habitipy
conf = {
'url': 'https://habitica.com',
'login': 'your-login-uuid-to-replace',
'password': 'your-password-uuid-here'
api = Habitipy(conf)
print(api.user.get())
```
Interactive help:
```python
In [1]: from habitipy import Habitipy, load_conf,DEFAULT_CONF
In [2]: api = Habitipy(load_conf(DEFAULT_CONF))
In [3]: api.<tab>
api.approvals api.debug api.models api.tags
api.challenges api.group api.notifications api.tasks
api.content api.groups api.reorder-tags api.user
api.coupons api.hall api.shops
api.cron api.members api.status
In [84]: api.user.get?
Signature: api.user.get(**kwargs)
Type: Habitipy
String form: <habitipy.api.Habitipy object at 0x7fa6fd7966d8>
File: ~/projects/python/habitica/habitipy/api.py
Docstring:
{get} /api/v3/user Get the authenticated user's profile
responce params:
"data" of type "object"
```
From other Python consoles you can just run:
```python
>>> dir(api)
['__call__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__',
'__format__', '__ge__', '__getattr__', '__getattribute__', '__getitem__', '__gt__',
'__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__',
'__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__', '_apis', '_conf', '_current', '_is_request',
'_make_apis_dict', '_make_headers', '_node',
'approvals', 'challenges', 'content', 'coupons', 'cron', 'debug', 'group', 'groups', 'hall',
'members', 'models', 'notifications', 'reorder-tags',
'shops', 'status', 'tags', 'tasks', 'user']
>>> print(api.user.get.__doc__)
{get} /api/v3/user Get the authenticated user's profile
responce params:
"data" of type "object"
```
"""
def __init__(self, conf: Dict[str, str], *,
apis=None, current: Optional[List[str]] = None,
from_github=False, branch=None,
strict=False) -> None:
self._conf = conf
self._strict = strict
if isinstance(apis, (type(None), list)):
if not apis:
fn = local.path(APIDOC_LOCAL_FILE)
if not fn.exists():
fn = pkg_resources.resource_filename('habitipy', 'apidoc.txt')
fn = branch if from_github else fn
with warnings.catch_warnings():
warnings.simplefilter('error' if strict else 'ignore')
apis = parse_apidoc(fn, from_github)
with warnings.catch_warnings():
warnings.simplefilter('error' if strict else 'ignore')
apis = self._make_apis_dict(apis)
if isinstance(apis, ApiNode):
self._apis = apis
else:
raise TypeError('Possible apis {} have wrong type({})'.format(apis, type(apis)))
current = current or ['api', 'v3']
if not isinstance(current, list):
raise TypeError('Wrong current api position {}'.format(current))
_node = self._apis # type: Union[ApiNode, ApiEndpoint]
for part in current:
if isinstance(_node, ApiNode):
_node = _node.into(part)
else:
raise WrongPath("""Can't enter into {} with part {}""".format(_node, part))
self._node = _node
self._current = current
if isinstance(self._node, ApiEndpoint):
self.__doc__ = self._node.render_docstring()
@staticmethod
def _make_apis_dict(apis) -> ApiNode:
node = ApiNode()
for api in apis:
cur_node = node
prev_part = ''
for part in api.parted_uri:
if cur_node.can_into(part):
_node = cur_node.into(part)
else:
try:
_node = cur_node.place(part, ApiNode())
except ParamAlreadyExist:
warnings.warn('Ignoring conflicting param. Don\'t use {}'.format(api.uri))
_node = cur_node.param
cur_node = _node # type: ignore
prev_part += '/' + part
cur_node.place(api.method, api)
return node
def _make_headers(self):
headers = {
'x-api-user': self._conf['login'],
'x-api-key': self._conf['password']
} if self._conf else {}
headers.update({'content-type': API_CONTENT_TYPE})
return headers
def __dir__(self):
return super().__dir__() + list(escape_keywords(self._node.keys()))
def __getattr__(self, val: str) -> Union[Any, 'Habitipy']:
if val in dir(super()):
# pylint: disable=no-member
return super().__getattr__(val) # type:ignore
val = val if not val.endswith('_') else val.rstrip('_')
val = val if '_' not in val else val.replace('_', '-')
return self.__class__(self._conf, apis=self._apis, current=self._current + [val])
def __getitem__(self, val: Union[str, List[str], Tuple[str, ...]]) -> 'Habitipy':
if isinstance(val, str):
return self.__class__(self._conf, apis=self._apis, current=self._current + [val])
if isinstance(val, (list, tuple)):
current = self._current + list(val)
return self.__class__(self._conf, apis=self._apis, current=current)
raise IndexError('{} not found in this API!'.format(val))
def _prepare_request(self, backend=requests, **kwargs):
uri = '/'.join([self._conf['url']] + self._current[:-1])
if not isinstance(self._node, ApiEndpoint):
raise ValueError('{} is not an endpoint!'.format(uri))
method = self._node.method
headers = self._make_headers()
query = {}
if 'query' in self._node.params:
for name, param in self._node.params['query'].items():
if name in kwargs:
query[name] = kwargs.pop(name)
elif not param.is_optional:
raise TypeError('Mandatory param {} is missing'.format(name))
request = getattr(backend, method)
request_args = (uri,)
request_kwargs = dict(headers=headers, params=query)
if method in ['put', 'post', 'delete']:
request_kwargs['data'] = json.dumps(kwargs)
return request, request_args, request_kwargs
def _request(self, request, request_args, request_kwargs):
res = request(*request_args, **request_kwargs)
if res.status_code != self._node.retcode:
res.raise_for_status()
msg = _("""
Got return code {res.status_code}, but {node.retcode} was
expected for {node.uri}. It may be a typo in Habitica apiDoc.
Please file an issue to https://github.com/HabitRPG/habitica/issues""")
msg = textwrap.dedent(msg)
msg = msg.replace('\n', ' ').format(res=res, node=self._node)
if self._strict:
raise WrongReturnCode(msg)
else:
warnings.warn(msg)
return res.json()['data']
def __call__(self, **kwargs) -> Union[Dict, List]:
return self._request(*self._prepare_request(**kwargs))
def save_apidoc(text: str) -> None:
"""save `text` to apidoc cache"""
apidoc_local = local.path(APIDOC_LOCAL_FILE)
if not apidoc_local.dirname.exists():
apidoc_local.dirname.mkdir()
with open(apidoc_local, 'w') as f:
f.write(text)
def parse_apidoc(
file_or_branch,
from_github=False,
save_github_version=True
) -> List['ApiEndpoint']:
"""read file and parse apiDoc lines"""
apis = [] # type: List[ApiEndpoint]
regex = r'(?P<group>\([^)]*\)){0,1} *(?P<type_>{[^}]*}){0,1} *'
regex += r'(?P<field>[^ ]*) *(?P<description>.*)$'
param_regex = re.compile(r'^@apiParam {1,}' + regex)
success_regex = re.compile(r'^@apiSuccess {1,}' + regex)
if from_github:
text = download_api(file_or_branch)
if save_github_version:
save_apidoc(text)
else:
with open(file_or_branch) as f:
text = f.read()
for line in text.split('\n'):
line = line.replace('\n', '')
if line.startswith('@api '):
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
split_line = line.split(' ')
assert len(split_line) >= 3
method = split_line[1]
uri = split_line[2]
assert method[0] == '{'
assert method[-1] == '}'
method = method[1:-1]
if not uri.startswith(API_URI_BASE):
warnings.warn(_("Wrong api url: {}").format(uri)) # noqa: Q000
title = ' '.join(split_line[3:])
apis.append(ApiEndpoint(method, uri, title))
elif line.startswith('@apiParam '):
res = next(param_regex.finditer(line)).groupdict()
apis[-1].add_param(**res)
elif line.startswith('@apiSuccess '):
res = next(success_regex.finditer(line)).groupdict()
apis[-1].add_success(**res)
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
return apis
class ApiEndpoint:
"""
Represents a single api endpoint.
"""
def __init__(self, method, uri, title=''):
self.method = method
self.uri = uri
self.parted_uri = uri[1:].split('/')
self.title = title
self.params = defaultdict(dict)
self.retcode = None
def add_param(self, group=None, type_='', field='', description=''):
"""parse and append a param"""
group = group or '(Parameter)'
group = group.lower()[1:-1]
p = Param(type_, field, description)
self.params[group][p.field] = p
def add_success(self, group=None, type_='', field='', description=''):
"""parse and append a success data param"""
group = group or '(200)'
group = int(group.lower()[1:-1])
self.retcode = self.retcode or group
if group != self.retcode:
raise ValueError('Two or more retcodes!')
type_ = type_ or '{String}'
p = Param(type_, field, description)
self.params['responce'][p.field] = p
def __repr__(self):
return '<@api {{{self.method}}} {self.uri} {self.title}>'.format(self=self)
def render_docstring(self):
"""make a nice docstring for ipython"""
res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self)
if self.params:
for group, params in self.params.items():
res += '\n' + group + ' params:\n'
for param in params.values():
res += param.render_docstring()
return res
# TODO: fix type checking
_valid_types = {
'string': lambda x: isinstance(x, str),
'sring': lambda x: isinstance(x, str),
'number': lambda x: isinstance(x, float),
'uuid': lambda u: isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex
}
class Param:
"""represents param of request or responce"""
def __init__(self, type_, field, description):
self.is_optional = field[0] == '[' and field[-1] == ']'
self.field = field[1:-1] if self.is_optional else field
if '=' in self.field:
self.field, self.default = self.field.split('=')
else:
self.default = ''
self.field = self.field.split('.')
if len(self.field) > 1:
self.path, self.field = self.field[:-1], self.field[-1]
else:
self.field = self.field[0]
self.path = []
if type_:
self.type = type_[1:-1] if len(type_) > 2 else type_
if '=' in self.type:
self.type, self.possible_values = self.type.split('=')
self.possible_values = list(map(
lambda s: s if s[0] != '"' else s[1:-1],
self.possible_values.split(',')))
else:
self.possible_values = []
self.type = self.type.lower()
else:
self.type = None
self.possible_values = []
self.description = description
def validate(self, obj):
"""check if obj has this api param"""
if self.path:
for i in self.path:
obj = obj[i]
obj = obj[self.field]
raise NotImplementedError('Validation is not implemented yet')
def render_docstring(self):
"""make a nice docstring for ipython"""
default = (' = ' + str(self.default)) if self.default else ''
opt = 'optional' if self.is_optional else ''
can_be = ' '.join(self.possible_values) if self.possible_values else ''
can_be = 'one of [{}]'.format(can_be) if can_be else ''
type_ = 'of type "' + str(self.type) + '"'
res = ' '.join([opt, '"' + self.field + '"', default, type_, can_be, '\n'])
return res.replace(' ', ' ').lstrip()
|
ASMfreaK/habitipy | habitipy/api.py | save_apidoc | python | def save_apidoc(text: str) -> None:
apidoc_local = local.path(APIDOC_LOCAL_FILE)
if not apidoc_local.dirname.exists():
apidoc_local.dirname.mkdir()
with open(apidoc_local, 'w') as f:
f.write(text) | save `text` to apidoc cache | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L300-L306 | null | """
habitipy - tools and library for Habitica restful API
RESTful api abstraction module using requests
"""
# pylint: disable=invalid-name,too-few-public-methods,too-many-locals, bad-continuation
# pylint: disable=bad-whitespace
import json
import re
import uuid
from keyword import kwlist
import warnings
import textwrap
from collections import defaultdict
from typing import Dict, Union, List, Tuple, Iterator, Any, Optional
import pkg_resources
import requests
from plumbum import local
from .util import get_translation_functions
API_URI_BASE = '/api/v3'
API_CONTENT_TYPE = 'application/json'
APIDOC_LOCAL_FILE = '~/.config/habitipy/apidoc.txt'
_, ngettext = get_translation_functions('habitipy', names=('gettext', 'ngettext'))
class ParamAlreadyExist(ValueError):
"""Custom error type"""
class WrongReturnCode(ValueError):
"""Custom error type"""
class WrongData(ValueError):
"""Custom error type"""
class WrongPath(ValueError):
"""Custom error type"""
class ApiNode:
"""Represents a middle point in API"""
def __init__(self, param_name=None, param=None, paths=None):
self.param = param
self.param_name = param_name
self.paths = paths or {} # type: Dict[str, Union[ApiNode,ApiEndpoint]]
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
"""Get another leaf node with name `val` if possible"""
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) # NOQA: Q000
def can_into(self, val: str) -> bool:
"""Determine if there is a leaf node with name `val`"""
return val in self.paths or (self.param and self.param_name == val)
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val
def keys(self) -> Iterator[str]:
"""return all possible paths one can take from this ApiNode"""
if self.param:
yield self.param_name
yield from self.paths.keys()
def __repr__(self) -> str:
text = '<ApiNode {self.param_name}: {self.param} paths: {self.paths}>'
return text.format(self=self)
def is_param(self, val):
"""checks if val is this node's param"""
return val == self.param_name
def escape_keywords(arr):
"""append _ to all python keywords"""
for i in arr:
i = i if i not in kwlist else i + '_'
i = i if '-' not in i else i.replace('-', '_')
yield i
class Habitipy:
"""
Represents Habitica API
# Arguments
conf : Configuration dictionary for API. Should contain `url`, `login` and `password` fields
apis (None, List[ApiEndpoint], ApiNode): Field, representing API endpoints.
current : current position in the API
from_github : whether it is needed to download apiDoc from habitica's github
branch : branch to use to download apiDoc from habitica's github
strict : show warnings on inconsistent apiDocs
# Example
```python
from habitipy import Habitipy
conf = {
'url': 'https://habitica.com',
'login': 'your-login-uuid-to-replace',
'password': 'your-password-uuid-here'
api = Habitipy(conf)
print(api.user.get())
```
Interactive help:
```python
In [1]: from habitipy import Habitipy, load_conf,DEFAULT_CONF
In [2]: api = Habitipy(load_conf(DEFAULT_CONF))
In [3]: api.<tab>
api.approvals api.debug api.models api.tags
api.challenges api.group api.notifications api.tasks
api.content api.groups api.reorder-tags api.user
api.coupons api.hall api.shops
api.cron api.members api.status
In [84]: api.user.get?
Signature: api.user.get(**kwargs)
Type: Habitipy
String form: <habitipy.api.Habitipy object at 0x7fa6fd7966d8>
File: ~/projects/python/habitica/habitipy/api.py
Docstring:
{get} /api/v3/user Get the authenticated user's profile
responce params:
"data" of type "object"
```
From other Python consoles you can just run:
```python
>>> dir(api)
['__call__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__',
'__format__', '__ge__', '__getattr__', '__getattribute__', '__getitem__', '__gt__',
'__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__',
'__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__', '_apis', '_conf', '_current', '_is_request',
'_make_apis_dict', '_make_headers', '_node',
'approvals', 'challenges', 'content', 'coupons', 'cron', 'debug', 'group', 'groups', 'hall',
'members', 'models', 'notifications', 'reorder-tags',
'shops', 'status', 'tags', 'tasks', 'user']
>>> print(api.user.get.__doc__)
{get} /api/v3/user Get the authenticated user's profile
responce params:
"data" of type "object"
```
"""
def __init__(self, conf: Dict[str, str], *,
apis=None, current: Optional[List[str]] = None,
from_github=False, branch=None,
strict=False) -> None:
self._conf = conf
self._strict = strict
if isinstance(apis, (type(None), list)):
if not apis:
fn = local.path(APIDOC_LOCAL_FILE)
if not fn.exists():
fn = pkg_resources.resource_filename('habitipy', 'apidoc.txt')
fn = branch if from_github else fn
with warnings.catch_warnings():
warnings.simplefilter('error' if strict else 'ignore')
apis = parse_apidoc(fn, from_github)
with warnings.catch_warnings():
warnings.simplefilter('error' if strict else 'ignore')
apis = self._make_apis_dict(apis)
if isinstance(apis, ApiNode):
self._apis = apis
else:
raise TypeError('Possible apis {} have wrong type({})'.format(apis, type(apis)))
current = current or ['api', 'v3']
if not isinstance(current, list):
raise TypeError('Wrong current api position {}'.format(current))
_node = self._apis # type: Union[ApiNode, ApiEndpoint]
for part in current:
if isinstance(_node, ApiNode):
_node = _node.into(part)
else:
raise WrongPath("""Can't enter into {} with part {}""".format(_node, part))
self._node = _node
self._current = current
if isinstance(self._node, ApiEndpoint):
self.__doc__ = self._node.render_docstring()
@staticmethod
def _make_apis_dict(apis) -> ApiNode:
node = ApiNode()
for api in apis:
cur_node = node
prev_part = ''
for part in api.parted_uri:
if cur_node.can_into(part):
_node = cur_node.into(part)
else:
try:
_node = cur_node.place(part, ApiNode())
except ParamAlreadyExist:
warnings.warn('Ignoring conflicting param. Don\'t use {}'.format(api.uri))
_node = cur_node.param
cur_node = _node # type: ignore
prev_part += '/' + part
cur_node.place(api.method, api)
return node
def _make_headers(self):
headers = {
'x-api-user': self._conf['login'],
'x-api-key': self._conf['password']
} if self._conf else {}
headers.update({'content-type': API_CONTENT_TYPE})
return headers
def __dir__(self):
return super().__dir__() + list(escape_keywords(self._node.keys()))
def __getattr__(self, val: str) -> Union[Any, 'Habitipy']:
if val in dir(super()):
# pylint: disable=no-member
return super().__getattr__(val) # type:ignore
val = val if not val.endswith('_') else val.rstrip('_')
val = val if '_' not in val else val.replace('_', '-')
return self.__class__(self._conf, apis=self._apis, current=self._current + [val])
def __getitem__(self, val: Union[str, List[str], Tuple[str, ...]]) -> 'Habitipy':
if isinstance(val, str):
return self.__class__(self._conf, apis=self._apis, current=self._current + [val])
if isinstance(val, (list, tuple)):
current = self._current + list(val)
return self.__class__(self._conf, apis=self._apis, current=current)
raise IndexError('{} not found in this API!'.format(val))
def _prepare_request(self, backend=requests, **kwargs):
uri = '/'.join([self._conf['url']] + self._current[:-1])
if not isinstance(self._node, ApiEndpoint):
raise ValueError('{} is not an endpoint!'.format(uri))
method = self._node.method
headers = self._make_headers()
query = {}
if 'query' in self._node.params:
for name, param in self._node.params['query'].items():
if name in kwargs:
query[name] = kwargs.pop(name)
elif not param.is_optional:
raise TypeError('Mandatory param {} is missing'.format(name))
request = getattr(backend, method)
request_args = (uri,)
request_kwargs = dict(headers=headers, params=query)
if method in ['put', 'post', 'delete']:
request_kwargs['data'] = json.dumps(kwargs)
return request, request_args, request_kwargs
def _request(self, request, request_args, request_kwargs):
res = request(*request_args, **request_kwargs)
if res.status_code != self._node.retcode:
res.raise_for_status()
msg = _("""
Got return code {res.status_code}, but {node.retcode} was
expected for {node.uri}. It may be a typo in Habitica apiDoc.
Please file an issue to https://github.com/HabitRPG/habitica/issues""")
msg = textwrap.dedent(msg)
msg = msg.replace('\n', ' ').format(res=res, node=self._node)
if self._strict:
raise WrongReturnCode(msg)
else:
warnings.warn(msg)
return res.json()['data']
def __call__(self, **kwargs) -> Union[Dict, List]:
return self._request(*self._prepare_request(**kwargs))
def download_api(branch=None) -> str:
"""download API documentation from _branch_ of Habitica\'s repo on Github"""
habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica'
if not branch:
branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name']
curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)]
tar = local['tar'][
'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout']
grep = local['grep']['@api']
sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-']
return (curl | tar | grep | sed)()
def parse_apidoc(
file_or_branch,
from_github=False,
save_github_version=True
) -> List['ApiEndpoint']:
"""read file and parse apiDoc lines"""
apis = [] # type: List[ApiEndpoint]
regex = r'(?P<group>\([^)]*\)){0,1} *(?P<type_>{[^}]*}){0,1} *'
regex += r'(?P<field>[^ ]*) *(?P<description>.*)$'
param_regex = re.compile(r'^@apiParam {1,}' + regex)
success_regex = re.compile(r'^@apiSuccess {1,}' + regex)
if from_github:
text = download_api(file_or_branch)
if save_github_version:
save_apidoc(text)
else:
with open(file_or_branch) as f:
text = f.read()
for line in text.split('\n'):
line = line.replace('\n', '')
if line.startswith('@api '):
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
split_line = line.split(' ')
assert len(split_line) >= 3
method = split_line[1]
uri = split_line[2]
assert method[0] == '{'
assert method[-1] == '}'
method = method[1:-1]
if not uri.startswith(API_URI_BASE):
warnings.warn(_("Wrong api url: {}").format(uri)) # noqa: Q000
title = ' '.join(split_line[3:])
apis.append(ApiEndpoint(method, uri, title))
elif line.startswith('@apiParam '):
res = next(param_regex.finditer(line)).groupdict()
apis[-1].add_param(**res)
elif line.startswith('@apiSuccess '):
res = next(success_regex.finditer(line)).groupdict()
apis[-1].add_success(**res)
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
return apis
class ApiEndpoint:
"""
Represents a single api endpoint.
"""
def __init__(self, method, uri, title=''):
self.method = method
self.uri = uri
self.parted_uri = uri[1:].split('/')
self.title = title
self.params = defaultdict(dict)
self.retcode = None
def add_param(self, group=None, type_='', field='', description=''):
"""parse and append a param"""
group = group or '(Parameter)'
group = group.lower()[1:-1]
p = Param(type_, field, description)
self.params[group][p.field] = p
def add_success(self, group=None, type_='', field='', description=''):
"""parse and append a success data param"""
group = group or '(200)'
group = int(group.lower()[1:-1])
self.retcode = self.retcode or group
if group != self.retcode:
raise ValueError('Two or more retcodes!')
type_ = type_ or '{String}'
p = Param(type_, field, description)
self.params['responce'][p.field] = p
def __repr__(self):
return '<@api {{{self.method}}} {self.uri} {self.title}>'.format(self=self)
def render_docstring(self):
"""make a nice docstring for ipython"""
res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self)
if self.params:
for group, params in self.params.items():
res += '\n' + group + ' params:\n'
for param in params.values():
res += param.render_docstring()
return res
# TODO: fix type checking
_valid_types = {
'string': lambda x: isinstance(x, str),
'sring': lambda x: isinstance(x, str),
'number': lambda x: isinstance(x, float),
'uuid': lambda u: isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex
}
class Param:
"""represents param of request or responce"""
def __init__(self, type_, field, description):
self.is_optional = field[0] == '[' and field[-1] == ']'
self.field = field[1:-1] if self.is_optional else field
if '=' in self.field:
self.field, self.default = self.field.split('=')
else:
self.default = ''
self.field = self.field.split('.')
if len(self.field) > 1:
self.path, self.field = self.field[:-1], self.field[-1]
else:
self.field = self.field[0]
self.path = []
if type_:
self.type = type_[1:-1] if len(type_) > 2 else type_
if '=' in self.type:
self.type, self.possible_values = self.type.split('=')
self.possible_values = list(map(
lambda s: s if s[0] != '"' else s[1:-1],
self.possible_values.split(',')))
else:
self.possible_values = []
self.type = self.type.lower()
else:
self.type = None
self.possible_values = []
self.description = description
def validate(self, obj):
"""check if obj has this api param"""
if self.path:
for i in self.path:
obj = obj[i]
obj = obj[self.field]
raise NotImplementedError('Validation is not implemented yet')
def render_docstring(self):
"""make a nice docstring for ipython"""
default = (' = ' + str(self.default)) if self.default else ''
opt = 'optional' if self.is_optional else ''
can_be = ' '.join(self.possible_values) if self.possible_values else ''
can_be = 'one of [{}]'.format(can_be) if can_be else ''
type_ = 'of type "' + str(self.type) + '"'
res = ' '.join([opt, '"' + self.field + '"', default, type_, can_be, '\n'])
return res.replace(' ', ' ').lstrip()
|
ASMfreaK/habitipy | habitipy/api.py | parse_apidoc | python | def parse_apidoc(
file_or_branch,
from_github=False,
save_github_version=True
) -> List['ApiEndpoint']:
apis = [] # type: List[ApiEndpoint]
regex = r'(?P<group>\([^)]*\)){0,1} *(?P<type_>{[^}]*}){0,1} *'
regex += r'(?P<field>[^ ]*) *(?P<description>.*)$'
param_regex = re.compile(r'^@apiParam {1,}' + regex)
success_regex = re.compile(r'^@apiSuccess {1,}' + regex)
if from_github:
text = download_api(file_or_branch)
if save_github_version:
save_apidoc(text)
else:
with open(file_or_branch) as f:
text = f.read()
for line in text.split('\n'):
line = line.replace('\n', '')
if line.startswith('@api '):
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
split_line = line.split(' ')
assert len(split_line) >= 3
method = split_line[1]
uri = split_line[2]
assert method[0] == '{'
assert method[-1] == '}'
method = method[1:-1]
if not uri.startswith(API_URI_BASE):
warnings.warn(_("Wrong api url: {}").format(uri)) # noqa: Q000
title = ' '.join(split_line[3:])
apis.append(ApiEndpoint(method, uri, title))
elif line.startswith('@apiParam '):
res = next(param_regex.finditer(line)).groupdict()
apis[-1].add_param(**res)
elif line.startswith('@apiSuccess '):
res = next(success_regex.finditer(line)).groupdict()
apis[-1].add_success(**res)
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
return apis | read file and parse apiDoc lines | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L309-L353 | [
"def download_api(branch=None) -> str:\n \"\"\"download API documentation from _branch_ of Habitica\\'s repo on Github\"\"\"\n habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica'\n if not branch:\n branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name']\... | """
habitipy - tools and library for Habitica restful API
RESTful api abstraction module using requests
"""
# pylint: disable=invalid-name,too-few-public-methods,too-many-locals, bad-continuation
# pylint: disable=bad-whitespace
import json
import re
import uuid
from keyword import kwlist
import warnings
import textwrap
from collections import defaultdict
from typing import Dict, Union, List, Tuple, Iterator, Any, Optional
import pkg_resources
import requests
from plumbum import local
from .util import get_translation_functions
API_URI_BASE = '/api/v3'
API_CONTENT_TYPE = 'application/json'
APIDOC_LOCAL_FILE = '~/.config/habitipy/apidoc.txt'
_, ngettext = get_translation_functions('habitipy', names=('gettext', 'ngettext'))
class ParamAlreadyExist(ValueError):
"""Custom error type"""
class WrongReturnCode(ValueError):
"""Custom error type"""
class WrongData(ValueError):
"""Custom error type"""
class WrongPath(ValueError):
"""Custom error type"""
class ApiNode:
"""Represents a middle point in API"""
def __init__(self, param_name=None, param=None, paths=None):
self.param = param
self.param_name = param_name
self.paths = paths or {} # type: Dict[str, Union[ApiNode,ApiEndpoint]]
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
"""Get another leaf node with name `val` if possible"""
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) # NOQA: Q000
def can_into(self, val: str) -> bool:
"""Determine if there is a leaf node with name `val`"""
return val in self.paths or (self.param and self.param_name == val)
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val
def keys(self) -> Iterator[str]:
"""return all possible paths one can take from this ApiNode"""
if self.param:
yield self.param_name
yield from self.paths.keys()
def __repr__(self) -> str:
text = '<ApiNode {self.param_name}: {self.param} paths: {self.paths}>'
return text.format(self=self)
def is_param(self, val):
"""checks if val is this node's param"""
return val == self.param_name
def escape_keywords(arr):
"""append _ to all python keywords"""
for i in arr:
i = i if i not in kwlist else i + '_'
i = i if '-' not in i else i.replace('-', '_')
yield i
class Habitipy:
"""
Represents Habitica API
# Arguments
conf : Configuration dictionary for API. Should contain `url`, `login` and `password` fields
apis (None, List[ApiEndpoint], ApiNode): Field, representing API endpoints.
current : current position in the API
from_github : whether it is needed to download apiDoc from habitica's github
branch : branch to use to download apiDoc from habitica's github
strict : show warnings on inconsistent apiDocs
# Example
```python
from habitipy import Habitipy
conf = {
'url': 'https://habitica.com',
'login': 'your-login-uuid-to-replace',
'password': 'your-password-uuid-here'
api = Habitipy(conf)
print(api.user.get())
```
Interactive help:
```python
In [1]: from habitipy import Habitipy, load_conf,DEFAULT_CONF
In [2]: api = Habitipy(load_conf(DEFAULT_CONF))
In [3]: api.<tab>
api.approvals api.debug api.models api.tags
api.challenges api.group api.notifications api.tasks
api.content api.groups api.reorder-tags api.user
api.coupons api.hall api.shops
api.cron api.members api.status
In [84]: api.user.get?
Signature: api.user.get(**kwargs)
Type: Habitipy
String form: <habitipy.api.Habitipy object at 0x7fa6fd7966d8>
File: ~/projects/python/habitica/habitipy/api.py
Docstring:
{get} /api/v3/user Get the authenticated user's profile
responce params:
"data" of type "object"
```
From other Python consoles you can just run:
```python
>>> dir(api)
['__call__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__',
'__format__', '__ge__', '__getattr__', '__getattribute__', '__getitem__', '__gt__',
'__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__',
'__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__', '_apis', '_conf', '_current', '_is_request',
'_make_apis_dict', '_make_headers', '_node',
'approvals', 'challenges', 'content', 'coupons', 'cron', 'debug', 'group', 'groups', 'hall',
'members', 'models', 'notifications', 'reorder-tags',
'shops', 'status', 'tags', 'tasks', 'user']
>>> print(api.user.get.__doc__)
{get} /api/v3/user Get the authenticated user's profile
responce params:
"data" of type "object"
```
"""
def __init__(self, conf: Dict[str, str], *,
apis=None, current: Optional[List[str]] = None,
from_github=False, branch=None,
strict=False) -> None:
self._conf = conf
self._strict = strict
if isinstance(apis, (type(None), list)):
if not apis:
fn = local.path(APIDOC_LOCAL_FILE)
if not fn.exists():
fn = pkg_resources.resource_filename('habitipy', 'apidoc.txt')
fn = branch if from_github else fn
with warnings.catch_warnings():
warnings.simplefilter('error' if strict else 'ignore')
apis = parse_apidoc(fn, from_github)
with warnings.catch_warnings():
warnings.simplefilter('error' if strict else 'ignore')
apis = self._make_apis_dict(apis)
if isinstance(apis, ApiNode):
self._apis = apis
else:
raise TypeError('Possible apis {} have wrong type({})'.format(apis, type(apis)))
current = current or ['api', 'v3']
if not isinstance(current, list):
raise TypeError('Wrong current api position {}'.format(current))
_node = self._apis # type: Union[ApiNode, ApiEndpoint]
for part in current:
if isinstance(_node, ApiNode):
_node = _node.into(part)
else:
raise WrongPath("""Can't enter into {} with part {}""".format(_node, part))
self._node = _node
self._current = current
if isinstance(self._node, ApiEndpoint):
self.__doc__ = self._node.render_docstring()
@staticmethod
def _make_apis_dict(apis) -> ApiNode:
node = ApiNode()
for api in apis:
cur_node = node
prev_part = ''
for part in api.parted_uri:
if cur_node.can_into(part):
_node = cur_node.into(part)
else:
try:
_node = cur_node.place(part, ApiNode())
except ParamAlreadyExist:
warnings.warn('Ignoring conflicting param. Don\'t use {}'.format(api.uri))
_node = cur_node.param
cur_node = _node # type: ignore
prev_part += '/' + part
cur_node.place(api.method, api)
return node
def _make_headers(self):
headers = {
'x-api-user': self._conf['login'],
'x-api-key': self._conf['password']
} if self._conf else {}
headers.update({'content-type': API_CONTENT_TYPE})
return headers
def __dir__(self):
return super().__dir__() + list(escape_keywords(self._node.keys()))
def __getattr__(self, val: str) -> Union[Any, 'Habitipy']:
if val in dir(super()):
# pylint: disable=no-member
return super().__getattr__(val) # type:ignore
val = val if not val.endswith('_') else val.rstrip('_')
val = val if '_' not in val else val.replace('_', '-')
return self.__class__(self._conf, apis=self._apis, current=self._current + [val])
def __getitem__(self, val: Union[str, List[str], Tuple[str, ...]]) -> 'Habitipy':
if isinstance(val, str):
return self.__class__(self._conf, apis=self._apis, current=self._current + [val])
if isinstance(val, (list, tuple)):
current = self._current + list(val)
return self.__class__(self._conf, apis=self._apis, current=current)
raise IndexError('{} not found in this API!'.format(val))
def _prepare_request(self, backend=requests, **kwargs):
uri = '/'.join([self._conf['url']] + self._current[:-1])
if not isinstance(self._node, ApiEndpoint):
raise ValueError('{} is not an endpoint!'.format(uri))
method = self._node.method
headers = self._make_headers()
query = {}
if 'query' in self._node.params:
for name, param in self._node.params['query'].items():
if name in kwargs:
query[name] = kwargs.pop(name)
elif not param.is_optional:
raise TypeError('Mandatory param {} is missing'.format(name))
request = getattr(backend, method)
request_args = (uri,)
request_kwargs = dict(headers=headers, params=query)
if method in ['put', 'post', 'delete']:
request_kwargs['data'] = json.dumps(kwargs)
return request, request_args, request_kwargs
def _request(self, request, request_args, request_kwargs):
res = request(*request_args, **request_kwargs)
if res.status_code != self._node.retcode:
res.raise_for_status()
msg = _("""
Got return code {res.status_code}, but {node.retcode} was
expected for {node.uri}. It may be a typo in Habitica apiDoc.
Please file an issue to https://github.com/HabitRPG/habitica/issues""")
msg = textwrap.dedent(msg)
msg = msg.replace('\n', ' ').format(res=res, node=self._node)
if self._strict:
raise WrongReturnCode(msg)
else:
warnings.warn(msg)
return res.json()['data']
def __call__(self, **kwargs) -> Union[Dict, List]:
return self._request(*self._prepare_request(**kwargs))
def download_api(branch=None) -> str:
"""download API documentation from _branch_ of Habitica\'s repo on Github"""
habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica'
if not branch:
branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name']
curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)]
tar = local['tar'][
'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout']
grep = local['grep']['@api']
sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-']
return (curl | tar | grep | sed)()
def save_apidoc(text: str) -> None:
"""save `text` to apidoc cache"""
apidoc_local = local.path(APIDOC_LOCAL_FILE)
if not apidoc_local.dirname.exists():
apidoc_local.dirname.mkdir()
with open(apidoc_local, 'w') as f:
f.write(text)
class ApiEndpoint:
"""
Represents a single api endpoint.
"""
def __init__(self, method, uri, title=''):
self.method = method
self.uri = uri
self.parted_uri = uri[1:].split('/')
self.title = title
self.params = defaultdict(dict)
self.retcode = None
def add_param(self, group=None, type_='', field='', description=''):
"""parse and append a param"""
group = group or '(Parameter)'
group = group.lower()[1:-1]
p = Param(type_, field, description)
self.params[group][p.field] = p
def add_success(self, group=None, type_='', field='', description=''):
"""parse and append a success data param"""
group = group or '(200)'
group = int(group.lower()[1:-1])
self.retcode = self.retcode or group
if group != self.retcode:
raise ValueError('Two or more retcodes!')
type_ = type_ or '{String}'
p = Param(type_, field, description)
self.params['responce'][p.field] = p
def __repr__(self):
return '<@api {{{self.method}}} {self.uri} {self.title}>'.format(self=self)
def render_docstring(self):
"""make a nice docstring for ipython"""
res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self)
if self.params:
for group, params in self.params.items():
res += '\n' + group + ' params:\n'
for param in params.values():
res += param.render_docstring()
return res
# TODO: fix type checking
_valid_types = {
'string': lambda x: isinstance(x, str),
'sring': lambda x: isinstance(x, str),
'number': lambda x: isinstance(x, float),
'uuid': lambda u: isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex
}
class Param:
"""represents param of request or responce"""
def __init__(self, type_, field, description):
self.is_optional = field[0] == '[' and field[-1] == ']'
self.field = field[1:-1] if self.is_optional else field
if '=' in self.field:
self.field, self.default = self.field.split('=')
else:
self.default = ''
self.field = self.field.split('.')
if len(self.field) > 1:
self.path, self.field = self.field[:-1], self.field[-1]
else:
self.field = self.field[0]
self.path = []
if type_:
self.type = type_[1:-1] if len(type_) > 2 else type_
if '=' in self.type:
self.type, self.possible_values = self.type.split('=')
self.possible_values = list(map(
lambda s: s if s[0] != '"' else s[1:-1],
self.possible_values.split(',')))
else:
self.possible_values = []
self.type = self.type.lower()
else:
self.type = None
self.possible_values = []
self.description = description
def validate(self, obj):
"""check if obj has this api param"""
if self.path:
for i in self.path:
obj = obj[i]
obj = obj[self.field]
raise NotImplementedError('Validation is not implemented yet')
def render_docstring(self):
"""make a nice docstring for ipython"""
default = (' = ' + str(self.default)) if self.default else ''
opt = 'optional' if self.is_optional else ''
can_be = ' '.join(self.possible_values) if self.possible_values else ''
can_be = 'one of [{}]'.format(can_be) if can_be else ''
type_ = 'of type "' + str(self.type) + '"'
res = ' '.join([opt, '"' + self.field + '"', default, type_, can_be, '\n'])
return res.replace(' ', ' ').lstrip()
|
ASMfreaK/habitipy | habitipy/api.py | ApiNode.into | python | def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) | Get another leaf node with name `val` if possible | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L52-L58 | null | class ApiNode:
"""Represents a middle point in API"""
def __init__(self, param_name=None, param=None, paths=None):
self.param = param
self.param_name = param_name
self.paths = paths or {} # type: Dict[str, Union[ApiNode,ApiEndpoint]]
# NOQA: Q000
def can_into(self, val: str) -> bool:
"""Determine if there is a leaf node with name `val`"""
return val in self.paths or (self.param and self.param_name == val)
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val
def keys(self) -> Iterator[str]:
"""return all possible paths one can take from this ApiNode"""
if self.param:
yield self.param_name
yield from self.paths.keys()
def __repr__(self) -> str:
text = '<ApiNode {self.param_name}: {self.param} paths: {self.paths}>'
return text.format(self=self)
def is_param(self, val):
"""checks if val is this node's param"""
return val == self.param_name
|
ASMfreaK/habitipy | habitipy/api.py | ApiNode.can_into | python | def can_into(self, val: str) -> bool:
return val in self.paths or (self.param and self.param_name == val) | Determine if there is a leaf node with name `val` | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L60-L62 | null | class ApiNode:
"""Represents a middle point in API"""
def __init__(self, param_name=None, param=None, paths=None):
self.param = param
self.param_name = param_name
self.paths = paths or {} # type: Dict[str, Union[ApiNode,ApiEndpoint]]
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
"""Get another leaf node with name `val` if possible"""
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) # NOQA: Q000
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val
def keys(self) -> Iterator[str]:
"""return all possible paths one can take from this ApiNode"""
if self.param:
yield self.param_name
yield from self.paths.keys()
def __repr__(self) -> str:
text = '<ApiNode {self.param_name}: {self.param} paths: {self.paths}>'
return text.format(self=self)
def is_param(self, val):
"""checks if val is this node's param"""
return val == self.param_name
|
ASMfreaK/habitipy | habitipy/api.py | ApiNode.place | python | def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val | place a leaf node | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L64-L74 | null | class ApiNode:
"""Represents a middle point in API"""
def __init__(self, param_name=None, param=None, paths=None):
self.param = param
self.param_name = param_name
self.paths = paths or {} # type: Dict[str, Union[ApiNode,ApiEndpoint]]
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
"""Get another leaf node with name `val` if possible"""
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) # NOQA: Q000
def can_into(self, val: str) -> bool:
"""Determine if there is a leaf node with name `val`"""
return val in self.paths or (self.param and self.param_name == val)
def keys(self) -> Iterator[str]:
"""return all possible paths one can take from this ApiNode"""
if self.param:
yield self.param_name
yield from self.paths.keys()
def __repr__(self) -> str:
text = '<ApiNode {self.param_name}: {self.param} paths: {self.paths}>'
return text.format(self=self)
def is_param(self, val):
"""checks if val is this node's param"""
return val == self.param_name
|
ASMfreaK/habitipy | habitipy/api.py | ApiNode.keys | python | def keys(self) -> Iterator[str]:
if self.param:
yield self.param_name
yield from self.paths.keys() | return all possible paths one can take from this ApiNode | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L76-L80 | null | class ApiNode:
"""Represents a middle point in API"""
def __init__(self, param_name=None, param=None, paths=None):
self.param = param
self.param_name = param_name
self.paths = paths or {} # type: Dict[str, Union[ApiNode,ApiEndpoint]]
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
"""Get another leaf node with name `val` if possible"""
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) # NOQA: Q000
def can_into(self, val: str) -> bool:
"""Determine if there is a leaf node with name `val`"""
return val in self.paths or (self.param and self.param_name == val)
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val
def __repr__(self) -> str:
text = '<ApiNode {self.param_name}: {self.param} paths: {self.paths}>'
return text.format(self=self)
def is_param(self, val):
"""checks if val is this node's param"""
return val == self.param_name
|
ASMfreaK/habitipy | habitipy/api.py | ApiEndpoint.add_param | python | def add_param(self, group=None, type_='', field='', description=''):
group = group or '(Parameter)'
group = group.lower()[1:-1]
p = Param(type_, field, description)
self.params[group][p.field] = p | parse and append a param | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L368-L373 | null | class ApiEndpoint:
"""
Represents a single api endpoint.
"""
def __init__(self, method, uri, title=''):
self.method = method
self.uri = uri
self.parted_uri = uri[1:].split('/')
self.title = title
self.params = defaultdict(dict)
self.retcode = None
def add_success(self, group=None, type_='', field='', description=''):
"""parse and append a success data param"""
group = group or '(200)'
group = int(group.lower()[1:-1])
self.retcode = self.retcode or group
if group != self.retcode:
raise ValueError('Two or more retcodes!')
type_ = type_ or '{String}'
p = Param(type_, field, description)
self.params['responce'][p.field] = p
def __repr__(self):
return '<@api {{{self.method}}} {self.uri} {self.title}>'.format(self=self)
def render_docstring(self):
"""make a nice docstring for ipython"""
res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self)
if self.params:
for group, params in self.params.items():
res += '\n' + group + ' params:\n'
for param in params.values():
res += param.render_docstring()
return res
|
ASMfreaK/habitipy | habitipy/api.py | ApiEndpoint.add_success | python | def add_success(self, group=None, type_='', field='', description=''):
group = group or '(200)'
group = int(group.lower()[1:-1])
self.retcode = self.retcode or group
if group != self.retcode:
raise ValueError('Two or more retcodes!')
type_ = type_ or '{String}'
p = Param(type_, field, description)
self.params['responce'][p.field] = p | parse and append a success data param | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L375-L384 | null | class ApiEndpoint:
"""
Represents a single api endpoint.
"""
def __init__(self, method, uri, title=''):
self.method = method
self.uri = uri
self.parted_uri = uri[1:].split('/')
self.title = title
self.params = defaultdict(dict)
self.retcode = None
def add_param(self, group=None, type_='', field='', description=''):
"""parse and append a param"""
group = group or '(Parameter)'
group = group.lower()[1:-1]
p = Param(type_, field, description)
self.params[group][p.field] = p
def __repr__(self):
return '<@api {{{self.method}}} {self.uri} {self.title}>'.format(self=self)
def render_docstring(self):
"""make a nice docstring for ipython"""
res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self)
if self.params:
for group, params in self.params.items():
res += '\n' + group + ' params:\n'
for param in params.values():
res += param.render_docstring()
return res
|
ASMfreaK/habitipy | habitipy/api.py | ApiEndpoint.render_docstring | python | def render_docstring(self):
res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self)
if self.params:
for group, params in self.params.items():
res += '\n' + group + ' params:\n'
for param in params.values():
res += param.render_docstring()
return res | make a nice docstring for ipython | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L389-L397 | null | class ApiEndpoint:
"""
Represents a single api endpoint.
"""
def __init__(self, method, uri, title=''):
self.method = method
self.uri = uri
self.parted_uri = uri[1:].split('/')
self.title = title
self.params = defaultdict(dict)
self.retcode = None
def add_param(self, group=None, type_='', field='', description=''):
"""parse and append a param"""
group = group or '(Parameter)'
group = group.lower()[1:-1]
p = Param(type_, field, description)
self.params[group][p.field] = p
def add_success(self, group=None, type_='', field='', description=''):
"""parse and append a success data param"""
group = group or '(200)'
group = int(group.lower()[1:-1])
self.retcode = self.retcode or group
if group != self.retcode:
raise ValueError('Two or more retcodes!')
type_ = type_ or '{String}'
p = Param(type_, field, description)
self.params['responce'][p.field] = p
def __repr__(self):
return '<@api {{{self.method}}} {self.uri} {self.title}>'.format(self=self)
|
ASMfreaK/habitipy | habitipy/api.py | Param.validate | python | def validate(self, obj):
if self.path:
for i in self.path:
obj = obj[i]
obj = obj[self.field]
raise NotImplementedError('Validation is not implemented yet') | check if obj has this api param | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L439-L446 | null | class Param:
"""represents param of request or responce"""
def __init__(self, type_, field, description):
self.is_optional = field[0] == '[' and field[-1] == ']'
self.field = field[1:-1] if self.is_optional else field
if '=' in self.field:
self.field, self.default = self.field.split('=')
else:
self.default = ''
self.field = self.field.split('.')
if len(self.field) > 1:
self.path, self.field = self.field[:-1], self.field[-1]
else:
self.field = self.field[0]
self.path = []
if type_:
self.type = type_[1:-1] if len(type_) > 2 else type_
if '=' in self.type:
self.type, self.possible_values = self.type.split('=')
self.possible_values = list(map(
lambda s: s if s[0] != '"' else s[1:-1],
self.possible_values.split(',')))
else:
self.possible_values = []
self.type = self.type.lower()
else:
self.type = None
self.possible_values = []
self.description = description
def render_docstring(self):
"""make a nice docstring for ipython"""
default = (' = ' + str(self.default)) if self.default else ''
opt = 'optional' if self.is_optional else ''
can_be = ' '.join(self.possible_values) if self.possible_values else ''
can_be = 'one of [{}]'.format(can_be) if can_be else ''
type_ = 'of type "' + str(self.type) + '"'
res = ' '.join([opt, '"' + self.field + '"', default, type_, can_be, '\n'])
return res.replace(' ', ' ').lstrip()
|
ASMfreaK/habitipy | habitipy/api.py | Param.render_docstring | python | def render_docstring(self):
default = (' = ' + str(self.default)) if self.default else ''
opt = 'optional' if self.is_optional else ''
can_be = ' '.join(self.possible_values) if self.possible_values else ''
can_be = 'one of [{}]'.format(can_be) if can_be else ''
type_ = 'of type "' + str(self.type) + '"'
res = ' '.join([opt, '"' + self.field + '"', default, type_, can_be, '\n'])
return res.replace(' ', ' ').lstrip() | make a nice docstring for ipython | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L448-L456 | null | class Param:
"""represents param of request or responce"""
def __init__(self, type_, field, description):
self.is_optional = field[0] == '[' and field[-1] == ']'
self.field = field[1:-1] if self.is_optional else field
if '=' in self.field:
self.field, self.default = self.field.split('=')
else:
self.default = ''
self.field = self.field.split('.')
if len(self.field) > 1:
self.path, self.field = self.field[:-1], self.field[-1]
else:
self.field = self.field[0]
self.path = []
if type_:
self.type = type_[1:-1] if len(type_) > 2 else type_
if '=' in self.type:
self.type, self.possible_values = self.type.split('=')
self.possible_values = list(map(
lambda s: s if s[0] != '"' else s[1:-1],
self.possible_values.split(',')))
else:
self.possible_values = []
self.type = self.type.lower()
else:
self.type = None
self.possible_values = []
self.description = description
def validate(self, obj):
"""check if obj has this api param"""
if self.path:
for i in self.path:
obj = obj[i]
obj = obj[self.field]
raise NotImplementedError('Validation is not implemented yet')
|
ASMfreaK/habitipy | habitipy/cli.py | is_uuid | python | def is_uuid(u):
if isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex:
return u
return False | validator for plumbum prompt | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L48-L52 | null | """
habitipy - tools and library for Habitica restful API
command-line interface library using plumbum
"""
# pylint: disable=arguments-differ, attribute-defined-outside-init,ungrouped-imports
# pylint: disable=invalid-name, logging-format-interpolation,too-few-public-methods
import warnings
import logging
import os
import json
import uuid
from bisect import bisect
from textwrap import dedent
from typing import List, Union, Dict, Any # pylint: disable=unused-import
import pkg_resources
from plumbum import local, cli, colors
import requests
from .api import Habitipy
from .util import assert_secure_file, secure_filestore
from .util import get_translation_functions, get_translation_for
from .util import prettify
try:
from json import JSONDecodeError # type: ignore
except ImportError:
JSONDecodeError = ValueError # type: ignore
DEFAULT_CONF = '~/.config/habitipy/config'
SUBCOMMANDS_JSON = '~/.config/habitipy/subcommands.json'
CONTENT_JSON = local.path('~/.config/habitipy/content.json')
_, ngettext = get_translation_functions('habitipy', names=('gettext', 'ngettext'))
CLASSES = [_("warrior"), _("rogue"), _("wizard"), _("healer")] # noqa: Q000
YES_ANSWERS = ('yes', 'y', 'true', 'True', '1')
CHECK_MARK_STYLES = ('wide', 'narrow', 'ascii')
CHECK = {
'wide': colors.green | '✔ ',
'narrow': colors.green | '✔',
'ascii': '[X]'
}
UNCHECK = {
'wide': colors.red | '✖ ',
'narrow': colors.red | '✖',
'ascii': '[ ]'
}
def load_conf(configfile, config=None):
"""Get authentication data from the AUTH_CONF file."""
default_login = 'your-login-for-api-here'
default_password = 'your-password-for-api-here'
config = config or {}
configfile = local.path(configfile)
if not configfile.exists():
configfile.dirname.mkdir()
else:
assert_secure_file(configfile)
with secure_filestore(), cli.Config(configfile) as conf:
config['url'] = conf.get('habitipy.url', 'https://habitica.com')
config['login'] = conf.get('habitipy.login', default_login)
config['password'] = conf.get('habitipy.password', default_password)
if config['login'] == default_login or config['password'] == default_password:
if cli.terminal.ask(
_("""Your creditentials are invalid. Do you want to enter them now?"""),
default=True):
msg = _("""
You can get your login information at
https://habitica.com/#/options/settings/api
Both your user id and API token should look like this:
xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
where 'x' is a number between 0-9 or a character a-f.
""")
print(dedent(msg))
msg = _("""Please enter your login (user ID)""")
config['login'] = cli.terminal.prompt(msg, validator=is_uuid)
msg = _("""Please enter your password (API token)""")
config['password'] = cli.terminal.prompt(msg, validator=is_uuid)
conf.set('habitipy.login', config['login'])
conf.set('habitipy.password', config['password'])
print(dedent(_("""
Your creditentials are securely stored in
{configfile}
You can edit that file later if you need.
""")).format(configfile=configfile))
config['show_numbers'] = conf.get('habitipy.show_numbers', 'y')
config['show_numbers'] = config['show_numbers'] in YES_ANSWERS
config['show_style'] = conf.get('habitipy.show_style', 'wide')
if config['show_style'] not in CHECK_MARK_STYLES:
config['show_style'] = 'wide'
return config
class ConfiguredApplication(cli.Application):
"""Application with config"""
config_filename = cli.SwitchAttr(
['-c', '--config'], argtype=local.path, default=DEFAULT_CONF,
argname='CONFIG',
help=_("Use file CONFIG for config")) # noqa: Q000
verbose = cli.Flag(
['-v', '--verbose'],
help=_("Verbose output - log everything."), # noqa: Q000
excludes=['-s', '--silent'])
silence_level = cli.CountOf(
['-s', '--silent'],
help=_("Make program more silent"), # noqa: Q000
excludes=['-v', '--verbose'])
def main(self):
self.config = load_conf(self.config_filename)
self.log = logging.getLogger(str(self.__class__).split("'")[1])
self.log.addHandler(logging.StreamHandler())
if self.verbose:
self.log.setLevel(logging.DEBUG)
else:
base_level = logging.INFO
self.log.setLevel(base_level + 10 * self.silence_level)
def get_content(api, rebuild_cache=False):
"""get content from server or cache"""
if hasattr(get_content, 'cache') and not rebuild_cache:
return get_content.cache
if not os.path.exists(CONTENT_JSON) or rebuild_cache:
import locale
content_endpoint = api.content.get
# pylint: disable=protected-access
try_langs = []
try:
lang = get_translation_for('habitipy').info()['language']
try_langs.append(lang)
except KeyError:
pass
try:
loc = locale.getdefaultlocale()[0]
if loc:
try_langs.append(loc)
try_langs.append(loc[:2])
except IndexError:
pass
server_lang = content_endpoint._node.params['query']['language']
# handle something like 'ru_RU' not available - only 'ru'
for lang in try_langs:
if lang in server_lang.possible_values:
loc = {'language': lang}
break
else:
loc = {}
get_content.cache = content = content_endpoint(**loc)
with open(CONTENT_JSON, 'w') as f:
json.dump(content, f)
return content
try:
with open(CONTENT_JSON) as f:
get_content.cache = content = json.load(f)
return content
except JSONDecodeError:
return get_content(api, rebuild_cache=True)
class ApplicationWithApi(ConfiguredApplication):
"""Application with configured Habitica API"""
api = None # type: Habitipy
def main(self):
super().main()
self.api = Habitipy(self.config)
class HabiticaCli(ConfiguredApplication): # pylint: disable=missing-docstring
DESCRIPTION = _("tools and library for Habitica restful API") # noqa: Q000
VERSION = pkg_resources.get_distribution('habitipy').version
def main(self):
if self.nested_command:
return
super().main()
self.log.error(_("No subcommand given, exiting")) # noqa: Q000
@HabiticaCli.subcommand('status') # pylint: disable=missing-docstring
class Status(ApplicationWithApi):
DESCRIPTION = _("Show HP, XP, GP, and more") # noqa: Q000
def main(self):
super().main()
user = self.api.user.get()
for key in ['hp', 'mp', 'exp']:
user['stats'][key] = round(user['stats'][key])
user['stats']['class'] = _(user['stats']['class']).capitalize()
user['food'] = sum(user['items']['food'].values())
content = get_content(self.api)
user['pet'] = user['items']['currentPet'] if 'currentPet' in user['items'] else None
user['pet'] = content['petInfo'][user['pet']]['text'] if user['pet'] else ''
user['pet'] = _("Pet: ") + user['pet'] if user['pet'] else _("No pet") # noqa: Q000
user['mount'] = user['items'].get('currentMount', None)
user['mount'] = content['mountInfo'][user['mount']]['text'] if user['mount'] else ''
if user['mount']:
user['mount'] = _("Mount: ") + user['mount'] # noqa: Q000
else:
user['mount'] = _("No mount") # noqa: Q000
level = _("\nLevel {stats[lvl]} {stats[class]}\n").format(**user) # noqa: Q000
highlight = '-' * (len(level) - 2)
level = highlight + level + highlight
result = [
level,
colors.red | _("Health: {stats[hp]}/{stats[maxHealth]}"), # noqa: Q000
colors.yellow | _("XP: {stats[exp]}/{stats[toNextLevel]}"), # noqa: Q000
colors.blue | _("Mana: {stats[mp]}/{stats[maxMP]}"), # noqa: Q000
colors.light_yellow | _("GP: {stats[gp]:.2f}"), # noqa: Q000
'{pet} ' + ngettext(
"({food} food item)", # noqa: Q000
"({food} food items)", # noqa: Q000
user['food']),
'{mount}']
quest = self.quest_info(user)
if quest:
result.append(quest)
print('\n'.join(result).format(**user))
def quest_info(self, user):
"""Get current quest info or return None"""
key = user['party']['quest'].get('key', None)
if '_id' not in user['party'] or key is None:
return None
for refresh in False, True:
content = get_content(self.api, refresh)
quest = content['quests'].get(key, None)
if quest:
break
else:
self.log.warning(dedent(_(
"""Quest {} not found in Habitica's content.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
return None
for quest_type, quest_template in (
('collect', _("""
Quest: {quest[text]} (collect-type)
{user[party][quest][progress][collectedItems]} quest items collected
""")),
('boss', _("""
Quest: {quest[text]} (boss)
{user[party][quest][progress][up]:.1f} damage will be dealt to {quest[boss][name]}
"""))):
if quest_type in quest:
try:
return dedent(quest_template.format(quest=quest, user=user))[1:-1]
except KeyError:
self.log.warning(dedent(_(
"""Something went wrong when formatting quest {}.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
return None
self.log.warning(dedent(_(
"""Quest {} isn't neither a collect-type or a boss-type.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
class ScoreInfo:
"""task value/score info: http://habitica.wikia.com/wiki/Task_Value"""
scores = {
'wide': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'narrow': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'ascii': ['*', '**', '***', '****', '*****', '******', '*******']
}
colors_ = ['Red3', 'Red1', 'DarkOrange', 'Gold3A', 'Green', 'LightCyan3', 'Cyan1']
breakpoints = [-20, -10, -1, 1, 5, 10]
def __new__(cls, style, value):
index = bisect(cls.breakpoints, value)
score = cls.scores[style][index]
score_col = colors.fg(cls.colors_[index])
if style == 'ascii':
max_scores_len = max(map(len, cls.scores[style]))
score = '[' + score.center(max_scores_len) + ']'
# score = '⎡' + score.center(cls.max_scores_len) + '⎤'
return score_col | score
@classmethod
def color(cls, value):
"""task value/score color"""
index = bisect(cls.breakpoints, value)
return colors.fg(cls.colors_[index])
class TasksPrint(ApplicationWithApi):
"""Put all tasks from `domain` to print"""
domain = '' # type: str
more_tasks = [] # type: List[Dict[str, Any]]
def domain_format(self, task):
"""format task for domain"""
raise NotImplementedError()
def main(self):
if self.nested_command:
return
super().main()
tasks = self.api.tasks.user.get(type=self.domain)
tasks.extend(self.more_tasks)
habits_len = len(tasks)
ident_size = len(str(habits_len)) + 2
number_format = '{{:{}d}}. '.format(ident_size - 2)
for i, task in enumerate(tasks):
i = number_format.format(i + 1) if self.config['show_numbers'] else ''
res = i + prettify(self.domain_format(task))
print(res)
@HabiticaCli.subcommand('habits') # pylint: disable=missing-docstring
class Habits(TasksPrint):
DESCRIPTION = _("List, up and down habit tasks") # noqa: Q000
domain = 'habits'
def domain_format(self, habit):
score = ScoreInfo(self.config['show_style'], habit['value'])
return _("{0} {text}").format(score, **habit) # noqa: Q000
@HabiticaCli.subcommand('dailies') # pylint: disable=missing-docstring
class Dailys(TasksPrint):
DESCRIPTION = _("List, check, uncheck daily tasks") # noqa: Q000
domain = 'dailys'
def domain_format(self, daily):
score = ScoreInfo(self.config['show_style'], daily['value'])
check = CHECK if daily['completed'] else UNCHECK
check = check[self.config['show_style']]
checklist_done = len(list(filter(lambda x: x['completed'], daily['checklist'])))
checklist = \
' {}/{}'.format(
checklist_done,
len(daily['checklist'])
) if daily['checklist'] else ''
res = _("{0}{1}{text}{2}").format(check, score, checklist, **daily) # noqa: Q000
if not daily['isDue']:
res = colors.strikeout + colors.dark_gray | res
return res
@HabiticaCli.subcommand('todos') # pylint: disable=missing-docstring
class ToDos(TasksPrint):
DESCRIPTION = _("List, comlete, add or delete todo tasks") # noqa: Q000
domain = 'todos'
def domain_format(self, todo):
score = ScoreInfo(self.config['show_style'], todo['value'])
check = CHECK if todo['completed'] else UNCHECK
check = check[self.config['show_style']]
checklist_done = len(list(filter(lambda x: x['completed'], todo['checklist'])))
checklist = \
' {}/{}'.format(
checklist_done,
len(todo['checklist'])
) if todo['checklist'] else ''
res = _("{1}{0}{text}{2}").format(check, score, checklist, **todo) # noqa: Q000
return res
def get_additional_rewards(api):
"""returns list of non-user rewards (potion, armoire, gear)"""
c = get_content(api)
tasks = [c[i] for i in ['potion', 'armoire']]
tasks.extend(api.user.inventory.buy.get())
for task in tasks:
task['id'] = task['alias'] = task['key']
return tasks
@HabiticaCli.subcommand('rewards') # pylint: disable=missing-docstring
class Rewards(TasksPrint):
DESCRIPTION = _("List, buy and add rewards") # noqa: Q000
domain = 'rewards'
def main(self):
if self.nested_command:
return
ApplicationWithApi.main(self)
self.more_tasks = get_additional_rewards(self.api)
super().main()
def domain_format(self, reward):
score = colors.yellow | _("{value} gp").format(**reward) # noqa: Q000
return _("{} {text}").format(score, **reward) # noqa: Q000
class TaskId(List[Union[str, int]]):
"""
handle task-id formats such as:
habitica todos done 3 taskalias_or_uuid
habitica todos done 1,2,3,taskalias_or_uuid
habitica todos done 2 3
habitica todos done 1-3,4 8
"""
def __new__(cls, tids: str):
task_ids = [] # type: List[Union[str, int]]
for bit in tids.split(','):
try:
if '-' in bit:
start, stop = [int(e) for e in bit.split('-')]
task_ids.extend(range(start, stop + 1))
else:
task_ids.append(int(bit))
except ValueError:
task_ids.append(bit)
return [e - 1 if isinstance(e, int) else e for e in task_ids] # type: ignore
class TasksChange(ApplicationWithApi):
"""find all tasks specified by user and do self.op on them"""
domain = '' # type: str
noop = cli.Flag(
['--dry-run', '--noop'],
help=_("If passed, won't actually change anything on habitipy server"), # noqa: Q000
default=False)
more_tasks = [] # type: List[Dict[str, Any]]
ids_can_overlap = False
NO_TASK_ID = _("No task_ids found!") # noqa: Q000
TASK_ID_INVALID = _("Task id {} is invalid") # noqa: Q000
PARSED_TASK_IDS = _("Parsed task ids {}") # noqa: Q000
def main(self, *task_ids: TaskId): # type: ignore
super().main()
task_id = [] # type: List[Union[str,int]]
for tids in task_ids:
task_id.extend(tids)
if not task_id:
self.log.error(self.NO_TASK_ID)
return 1
tasks = self.api.tasks.user.get(type=self.domain)
assert isinstance(tasks, list)
tasks.extend(self.more_tasks)
task_uuids = [task['id'] for task in tasks]
num_tasks = len(tasks)
aliases = {task['alias']: task for task in tasks if 'alias' in task}
self.changing_tasks = {} # type: Dict[Union[str], Dict[str, Any]]
changing_tasks_ids = [] # type: List[str]
for tid in task_id:
if isinstance(tid, int):
if 0 <= tid <= num_tasks:
changing_tasks_ids.append(task_uuids[tid])
self.changing_tasks[task_uuids[tid]] = tasks[tid]
continue
elif isinstance(tid, str):
if tid in task_uuids:
changing_tasks_ids.append(tid)
self.changing_tasks[tid] = tasks[task_uuids.index(tid)]
continue
elif tid in aliases:
t_id = aliases[tid]['id']
changing_tasks_ids.append(t_id)
self.changing_tasks[t_id] = aliases[tid]
continue
self.log.error(self.TASK_ID_INVALID.format(tid))
return 1
idstr = ' '.join(self.changing_tasks.keys())
self.log.info(self.PARSED_TASK_IDS.format(idstr)) # noqa: Q000
self.tasks = self.api.tasks
if not self.ids_can_overlap:
changing_tasks_ids = list(set(changing_tasks_ids))
for tid in changing_tasks_ids:
if not self.noop:
self.op(tid)
res = self.log_op(tid)
print(prettify(res))
self.domain_print()
def validate(self, task): # pylint: disable=no-self-use,unused-argument
"""check if task is valid for the operation"""
return True
def op(self, tid):
"""operation to be done on task with `tid`"""
raise NotImplementedError
def log_op(self, tid):
"""return a message to show user on successful change of `tid`"""
raise NotImplementedError
def domain_print(self):
"""show domain to user again"""
raise NotImplementedError
class HabitsChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'habits'
ids_can_overlap = True
def domain_print(self):
Habits.invoke(config_filename=self.config_filename)
@Habits.subcommand('add') # pylint: disable=missing-docstring
class HabitsAdd(ApplicationWithApi):
DESCRIPTION = _("Add a habit <habit>") # noqa: Q000
priority = cli.SwitchAttr(
['-p', '--priority'],
cli.Set('0.1', '1', '1.5', '2'), default='1',
help=_("Priority (complexity) of a habit")) # noqa: Q000
direction = cli.SwitchAttr(
['-d', '--direction'],
cli.Set('positive', 'negative', 'both'), default='both',
help=_("positive/negative/both")) # noqa: Q000
def main(self, *habit: str):
habit_str = ' '.join(habit)
if not habit_str:
self.log.error(_("Empty habit text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(
type='habit', text=habit_str,
priority=self.priority, up=(self.direction != 'negative'),
down=(self.direction != 'positive'))
res = _("Added habit '{}' with priority {} and direction {}").format( # noqa: Q000
habit_str, self.priority, self.direction)
print(prettify(res))
Habits.invoke(config_filename=self.config_filename)
return None
@Habits.subcommand('delete') # pylint: disable=missing-docstring
class HabitsDelete(HabitsChange):
DESCRIPTION = _("Delete a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].delete()
def log_op(self, tid):
return _("Deleted habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Habits.subcommand('up') # pylint: disable=missing-docstring
class HabitsUp(HabitsChange):
DESCRIPTION = _("Up (+) a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def validate(self, task):
return task['up']
def log_op(self, tid):
return _("Incremented habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Habits.subcommand('down') # pylint: disable=missing-docstring
class HabitsDown(HabitsChange):
DESCRIPTION = _("Down (-) a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['down'].post()
def validate(self, task):
return task['down']
def log_op(self, tid):
"""show a message to user on successful change of `tid`"""
return _("Decremented habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
class DailysChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'dailys'
def domain_print(self):
Dailys.invoke(config_filename=self.config_filename)
@Dailys.subcommand('done') # pylint: disable=missing-docstring
class DailysUp(DailysChange):
DESCRIPTION = _("Check a dayly with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Completed daily {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Dailys.subcommand('undo') # pylint: disable=missing-docstring
class DailyDown(DailysChange):
DESCRIPTION = _("Uncheck a daily with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['down'].post()
def log_op(self, tid):
return _("Unchecked daily {text}").format(**self.changing_tasks[tid]) # noqa: Q000
class TodosChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'todos'
def domain_print(self):
ToDos.invoke(config_filename=self.config_filename)
@ToDos.subcommand('done') # pylint: disable=missing-docstring
class TodosUp(TodosChange):
DESCRIPTION = _("Check a todo with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Completed todo {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@ToDos.subcommand('delete') # pylint: disable=missing-docstring
class TodosDelete(TodosChange):
DESCRIPTION = _("Delete a todo with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].delete()
def log_op(self, tid):
return _("Deleted todo {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@ToDos.subcommand('add') # pylint: disable=missing-docstring
class TodosAdd(ApplicationWithApi):
DESCRIPTION = _("Add a todo <todo>") # noqa: Q000
priority = cli.SwitchAttr(
['-p', '--priority'],
cli.Set('0.1', '1', '1.5', '2'), default='1',
help=_("Priority (complexity) of a todo")) # noqa: Q000
def main(self, *todo: str):
todo_str = ' '.join(todo)
if not todo_str:
self.log.error(_("Empty todo text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(type='todo', text=todo_str, priority=self.priority)
res = _("Added todo '{}' with priority {}").format(todo_str, self.priority) # noqa: Q000
print(prettify(res))
ToDos.invoke(config_filename=self.config_filename)
return 0
RewardId = TaskId
@Rewards.subcommand('buy') # pylint: disable=missing-docstring
class RewardsBuy(TasksChange):
DESCRIPTION = _("Buy a reward with reward_id") # noqa: Q000
domain = 'rewards'
ids_can_overlap = True
NO_TASK_ID = _("No reward_ids found!") # noqa: Q000
TASK_ID_INVALID = _("Reward id {} is invalid") # noqa: Q000
PARSED_TASK_IDS = _("Parsed reward ids {}") # noqa: Q000
def main(self, *reward_id: RewardId):
ApplicationWithApi.main(self)
self.more_tasks = get_additional_rewards(self.api)
super().main(*reward_id)
def op(self, tid):
t = self.changing_tasks[tid]
if t['type'] != 'rewards':
self.api.user.buy[t['key']].post()
else:
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Bought reward {text}").format(**self.changing_tasks[tid]) # noqa: Q000
def domain_print(self):
Rewards.invoke(config_filename=self.config_filename)
@Rewards.subcommand('add') # pylint: disable=missing-docstring
class RewardsAdd(ApplicationWithApi):
DESCRIPTION = _("Add a reward <reward>") # noqa: Q000
cost = cli.SwitchAttr(
['--cost'], default='10',
help=_("Cost of a reward (gp)")) # noqa: Q000
def main(self, *reward: str):
todo_str = ' '.join(reward)
if not todo_str:
self.log.error(_("Empty reward text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(type='reward', text=todo_str, value=self.cost)
res = _("Added reward '{}' with cost {}").format(todo_str, self.cost) # noqa: Q000
print(prettify(res))
Rewards.invoke(config_filename=self.config_filename)
return 0
@HabiticaCli.subcommand('home') # pylint: disable=missing-docstring
class Home(ConfiguredApplication):
DESCRIPTION = _("Open habitica site in browser") # noqa: Q000
def main(self):
super().main()
from webbrowser import open_new_tab
HABITICA_TASKS_PAGE = '/#/tasks'
home_url = '{}{}'.format(self.config['url'], HABITICA_TASKS_PAGE)
print(_("Opening {}").format(home_url)) # noqa: Q000
open_new_tab(home_url)
@HabiticaCli.subcommand('server') # pylint: disable=missing-docstring
class Server(ApplicationWithApi):
DESCRIPTION = _("Check habitica server availability") # noqa: Q000
def main(self):
super().main()
try:
ret = self.api.status.get()
if isinstance(ret, dict) and ret['status'] == 'up':
print(_("Habitica server {} online").format(self.config['url'])) # noqa: Q000
return 0
except (KeyError, requests.exceptions.ConnectionError):
pass
msg = _("Habitica server {} offline or there is some issue with it") # noqa: Q000
print(msg.format(self.config['url']))
return -1
@HabiticaCli.subcommand('spells') # pylint: disable=missing-docstring
class Spells(ApplicationWithApi):
DESCRIPTION = _("Prints all available spells") # noqa: Q000
def main(self):
if self.nested_command:
return
super().main()
user = self.api.user.get()
content = get_content(self.api)
user_level = user['stats']['lvl']
if user_level < 10:
print(_("Your level is too low. Come back on level 10 or higher")) # noqa: Q000
user_class = user['stats']['class']
user_spells = [
v for k, v in content['spells'][user_class].items()
if user_level > v['lvl']
]
print(_("You are a {} of level {}").format(_(user_class), user_level)) # noqa: Q000
for spell in sorted(user_spells, key=lambda x: x['lvl']):
msg = _("[{key}] {text} ({mana}:droplet:) - {notes}").format(**spell) # noqa: Q000
print(msg)
subcommands_file = local.path(SUBCOMMANDS_JSON)
if subcommands_file.exists():
try:
with open(subcommands_file) as subcommands_file_obj:
subcommands = json.load(subcommands_file_obj)
del subcommands_file_obj
for name, module in subcommands.items():
HabiticaCli.subcommand(name, module)
del name
del module
del subcommands
except (AttributeError, JSONDecodeError) as error:
warnings.warn('subcommands.json found, but it is invalid: {}'.format(error))
del error
del subcommands_file
if __name__ == '__main__':
HabiticaCli.run()
|
ASMfreaK/habitipy | habitipy/cli.py | load_conf | python | def load_conf(configfile, config=None):
default_login = 'your-login-for-api-here'
default_password = 'your-password-for-api-here'
config = config or {}
configfile = local.path(configfile)
if not configfile.exists():
configfile.dirname.mkdir()
else:
assert_secure_file(configfile)
with secure_filestore(), cli.Config(configfile) as conf:
config['url'] = conf.get('habitipy.url', 'https://habitica.com')
config['login'] = conf.get('habitipy.login', default_login)
config['password'] = conf.get('habitipy.password', default_password)
if config['login'] == default_login or config['password'] == default_password:
if cli.terminal.ask(
_("""Your creditentials are invalid. Do you want to enter them now?"""),
default=True):
msg = _("""
You can get your login information at
https://habitica.com/#/options/settings/api
Both your user id and API token should look like this:
xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
where 'x' is a number between 0-9 or a character a-f.
""")
print(dedent(msg))
msg = _("""Please enter your login (user ID)""")
config['login'] = cli.terminal.prompt(msg, validator=is_uuid)
msg = _("""Please enter your password (API token)""")
config['password'] = cli.terminal.prompt(msg, validator=is_uuid)
conf.set('habitipy.login', config['login'])
conf.set('habitipy.password', config['password'])
print(dedent(_("""
Your creditentials are securely stored in
{configfile}
You can edit that file later if you need.
""")).format(configfile=configfile))
config['show_numbers'] = conf.get('habitipy.show_numbers', 'y')
config['show_numbers'] = config['show_numbers'] in YES_ANSWERS
config['show_style'] = conf.get('habitipy.show_style', 'wide')
if config['show_style'] not in CHECK_MARK_STYLES:
config['show_style'] = 'wide'
return config | Get authentication data from the AUTH_CONF file. | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L55-L97 | [
"def assert_secure_file(file):\n \"\"\"checks if a file is stored securely\"\"\"\n if not is_secure_file(file):\n msg = \"\"\"\n File {0} can be read by other users.\n This is not secure. Please run 'chmod 600 \"{0}\"'\"\"\"\n raise SecurityError(dedent(msg).replace('\\n', ' ').for... | """
habitipy - tools and library for Habitica restful API
command-line interface library using plumbum
"""
# pylint: disable=arguments-differ, attribute-defined-outside-init,ungrouped-imports
# pylint: disable=invalid-name, logging-format-interpolation,too-few-public-methods
import warnings
import logging
import os
import json
import uuid
from bisect import bisect
from textwrap import dedent
from typing import List, Union, Dict, Any # pylint: disable=unused-import
import pkg_resources
from plumbum import local, cli, colors
import requests
from .api import Habitipy
from .util import assert_secure_file, secure_filestore
from .util import get_translation_functions, get_translation_for
from .util import prettify
try:
from json import JSONDecodeError # type: ignore
except ImportError:
JSONDecodeError = ValueError # type: ignore
DEFAULT_CONF = '~/.config/habitipy/config'
SUBCOMMANDS_JSON = '~/.config/habitipy/subcommands.json'
CONTENT_JSON = local.path('~/.config/habitipy/content.json')
_, ngettext = get_translation_functions('habitipy', names=('gettext', 'ngettext'))
CLASSES = [_("warrior"), _("rogue"), _("wizard"), _("healer")] # noqa: Q000
YES_ANSWERS = ('yes', 'y', 'true', 'True', '1')
CHECK_MARK_STYLES = ('wide', 'narrow', 'ascii')
CHECK = {
'wide': colors.green | '✔ ',
'narrow': colors.green | '✔',
'ascii': '[X]'
}
UNCHECK = {
'wide': colors.red | '✖ ',
'narrow': colors.red | '✖',
'ascii': '[ ]'
}
def is_uuid(u):
"""validator for plumbum prompt"""
if isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex:
return u
return False
class ConfiguredApplication(cli.Application):
"""Application with config"""
config_filename = cli.SwitchAttr(
['-c', '--config'], argtype=local.path, default=DEFAULT_CONF,
argname='CONFIG',
help=_("Use file CONFIG for config")) # noqa: Q000
verbose = cli.Flag(
['-v', '--verbose'],
help=_("Verbose output - log everything."), # noqa: Q000
excludes=['-s', '--silent'])
silence_level = cli.CountOf(
['-s', '--silent'],
help=_("Make program more silent"), # noqa: Q000
excludes=['-v', '--verbose'])
def main(self):
self.config = load_conf(self.config_filename)
self.log = logging.getLogger(str(self.__class__).split("'")[1])
self.log.addHandler(logging.StreamHandler())
if self.verbose:
self.log.setLevel(logging.DEBUG)
else:
base_level = logging.INFO
self.log.setLevel(base_level + 10 * self.silence_level)
def get_content(api, rebuild_cache=False):
"""get content from server or cache"""
if hasattr(get_content, 'cache') and not rebuild_cache:
return get_content.cache
if not os.path.exists(CONTENT_JSON) or rebuild_cache:
import locale
content_endpoint = api.content.get
# pylint: disable=protected-access
try_langs = []
try:
lang = get_translation_for('habitipy').info()['language']
try_langs.append(lang)
except KeyError:
pass
try:
loc = locale.getdefaultlocale()[0]
if loc:
try_langs.append(loc)
try_langs.append(loc[:2])
except IndexError:
pass
server_lang = content_endpoint._node.params['query']['language']
# handle something like 'ru_RU' not available - only 'ru'
for lang in try_langs:
if lang in server_lang.possible_values:
loc = {'language': lang}
break
else:
loc = {}
get_content.cache = content = content_endpoint(**loc)
with open(CONTENT_JSON, 'w') as f:
json.dump(content, f)
return content
try:
with open(CONTENT_JSON) as f:
get_content.cache = content = json.load(f)
return content
except JSONDecodeError:
return get_content(api, rebuild_cache=True)
class ApplicationWithApi(ConfiguredApplication):
"""Application with configured Habitica API"""
api = None # type: Habitipy
def main(self):
super().main()
self.api = Habitipy(self.config)
class HabiticaCli(ConfiguredApplication): # pylint: disable=missing-docstring
DESCRIPTION = _("tools and library for Habitica restful API") # noqa: Q000
VERSION = pkg_resources.get_distribution('habitipy').version
def main(self):
if self.nested_command:
return
super().main()
self.log.error(_("No subcommand given, exiting")) # noqa: Q000
@HabiticaCli.subcommand('status') # pylint: disable=missing-docstring
class Status(ApplicationWithApi):
DESCRIPTION = _("Show HP, XP, GP, and more") # noqa: Q000
def main(self):
super().main()
user = self.api.user.get()
for key in ['hp', 'mp', 'exp']:
user['stats'][key] = round(user['stats'][key])
user['stats']['class'] = _(user['stats']['class']).capitalize()
user['food'] = sum(user['items']['food'].values())
content = get_content(self.api)
user['pet'] = user['items']['currentPet'] if 'currentPet' in user['items'] else None
user['pet'] = content['petInfo'][user['pet']]['text'] if user['pet'] else ''
user['pet'] = _("Pet: ") + user['pet'] if user['pet'] else _("No pet") # noqa: Q000
user['mount'] = user['items'].get('currentMount', None)
user['mount'] = content['mountInfo'][user['mount']]['text'] if user['mount'] else ''
if user['mount']:
user['mount'] = _("Mount: ") + user['mount'] # noqa: Q000
else:
user['mount'] = _("No mount") # noqa: Q000
level = _("\nLevel {stats[lvl]} {stats[class]}\n").format(**user) # noqa: Q000
highlight = '-' * (len(level) - 2)
level = highlight + level + highlight
result = [
level,
colors.red | _("Health: {stats[hp]}/{stats[maxHealth]}"), # noqa: Q000
colors.yellow | _("XP: {stats[exp]}/{stats[toNextLevel]}"), # noqa: Q000
colors.blue | _("Mana: {stats[mp]}/{stats[maxMP]}"), # noqa: Q000
colors.light_yellow | _("GP: {stats[gp]:.2f}"), # noqa: Q000
'{pet} ' + ngettext(
"({food} food item)", # noqa: Q000
"({food} food items)", # noqa: Q000
user['food']),
'{mount}']
quest = self.quest_info(user)
if quest:
result.append(quest)
print('\n'.join(result).format(**user))
def quest_info(self, user):
"""Get current quest info or return None"""
key = user['party']['quest'].get('key', None)
if '_id' not in user['party'] or key is None:
return None
for refresh in False, True:
content = get_content(self.api, refresh)
quest = content['quests'].get(key, None)
if quest:
break
else:
self.log.warning(dedent(_(
"""Quest {} not found in Habitica's content.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
return None
for quest_type, quest_template in (
('collect', _("""
Quest: {quest[text]} (collect-type)
{user[party][quest][progress][collectedItems]} quest items collected
""")),
('boss', _("""
Quest: {quest[text]} (boss)
{user[party][quest][progress][up]:.1f} damage will be dealt to {quest[boss][name]}
"""))):
if quest_type in quest:
try:
return dedent(quest_template.format(quest=quest, user=user))[1:-1]
except KeyError:
self.log.warning(dedent(_(
"""Something went wrong when formatting quest {}.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
return None
self.log.warning(dedent(_(
"""Quest {} isn't neither a collect-type or a boss-type.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
class ScoreInfo:
"""task value/score info: http://habitica.wikia.com/wiki/Task_Value"""
scores = {
'wide': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'narrow': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'ascii': ['*', '**', '***', '****', '*****', '******', '*******']
}
colors_ = ['Red3', 'Red1', 'DarkOrange', 'Gold3A', 'Green', 'LightCyan3', 'Cyan1']
breakpoints = [-20, -10, -1, 1, 5, 10]
def __new__(cls, style, value):
index = bisect(cls.breakpoints, value)
score = cls.scores[style][index]
score_col = colors.fg(cls.colors_[index])
if style == 'ascii':
max_scores_len = max(map(len, cls.scores[style]))
score = '[' + score.center(max_scores_len) + ']'
# score = '⎡' + score.center(cls.max_scores_len) + '⎤'
return score_col | score
@classmethod
def color(cls, value):
"""task value/score color"""
index = bisect(cls.breakpoints, value)
return colors.fg(cls.colors_[index])
class TasksPrint(ApplicationWithApi):
"""Put all tasks from `domain` to print"""
domain = '' # type: str
more_tasks = [] # type: List[Dict[str, Any]]
def domain_format(self, task):
"""format task for domain"""
raise NotImplementedError()
def main(self):
if self.nested_command:
return
super().main()
tasks = self.api.tasks.user.get(type=self.domain)
tasks.extend(self.more_tasks)
habits_len = len(tasks)
ident_size = len(str(habits_len)) + 2
number_format = '{{:{}d}}. '.format(ident_size - 2)
for i, task in enumerate(tasks):
i = number_format.format(i + 1) if self.config['show_numbers'] else ''
res = i + prettify(self.domain_format(task))
print(res)
@HabiticaCli.subcommand('habits') # pylint: disable=missing-docstring
class Habits(TasksPrint):
DESCRIPTION = _("List, up and down habit tasks") # noqa: Q000
domain = 'habits'
def domain_format(self, habit):
score = ScoreInfo(self.config['show_style'], habit['value'])
return _("{0} {text}").format(score, **habit) # noqa: Q000
@HabiticaCli.subcommand('dailies') # pylint: disable=missing-docstring
class Dailys(TasksPrint):
DESCRIPTION = _("List, check, uncheck daily tasks") # noqa: Q000
domain = 'dailys'
def domain_format(self, daily):
score = ScoreInfo(self.config['show_style'], daily['value'])
check = CHECK if daily['completed'] else UNCHECK
check = check[self.config['show_style']]
checklist_done = len(list(filter(lambda x: x['completed'], daily['checklist'])))
checklist = \
' {}/{}'.format(
checklist_done,
len(daily['checklist'])
) if daily['checklist'] else ''
res = _("{0}{1}{text}{2}").format(check, score, checklist, **daily) # noqa: Q000
if not daily['isDue']:
res = colors.strikeout + colors.dark_gray | res
return res
@HabiticaCli.subcommand('todos') # pylint: disable=missing-docstring
class ToDos(TasksPrint):
DESCRIPTION = _("List, comlete, add or delete todo tasks") # noqa: Q000
domain = 'todos'
def domain_format(self, todo):
score = ScoreInfo(self.config['show_style'], todo['value'])
check = CHECK if todo['completed'] else UNCHECK
check = check[self.config['show_style']]
checklist_done = len(list(filter(lambda x: x['completed'], todo['checklist'])))
checklist = \
' {}/{}'.format(
checklist_done,
len(todo['checklist'])
) if todo['checklist'] else ''
res = _("{1}{0}{text}{2}").format(check, score, checklist, **todo) # noqa: Q000
return res
def get_additional_rewards(api):
"""returns list of non-user rewards (potion, armoire, gear)"""
c = get_content(api)
tasks = [c[i] for i in ['potion', 'armoire']]
tasks.extend(api.user.inventory.buy.get())
for task in tasks:
task['id'] = task['alias'] = task['key']
return tasks
@HabiticaCli.subcommand('rewards') # pylint: disable=missing-docstring
class Rewards(TasksPrint):
DESCRIPTION = _("List, buy and add rewards") # noqa: Q000
domain = 'rewards'
def main(self):
if self.nested_command:
return
ApplicationWithApi.main(self)
self.more_tasks = get_additional_rewards(self.api)
super().main()
def domain_format(self, reward):
score = colors.yellow | _("{value} gp").format(**reward) # noqa: Q000
return _("{} {text}").format(score, **reward) # noqa: Q000
class TaskId(List[Union[str, int]]):
"""
handle task-id formats such as:
habitica todos done 3 taskalias_or_uuid
habitica todos done 1,2,3,taskalias_or_uuid
habitica todos done 2 3
habitica todos done 1-3,4 8
"""
def __new__(cls, tids: str):
task_ids = [] # type: List[Union[str, int]]
for bit in tids.split(','):
try:
if '-' in bit:
start, stop = [int(e) for e in bit.split('-')]
task_ids.extend(range(start, stop + 1))
else:
task_ids.append(int(bit))
except ValueError:
task_ids.append(bit)
return [e - 1 if isinstance(e, int) else e for e in task_ids] # type: ignore
class TasksChange(ApplicationWithApi):
"""find all tasks specified by user and do self.op on them"""
domain = '' # type: str
noop = cli.Flag(
['--dry-run', '--noop'],
help=_("If passed, won't actually change anything on habitipy server"), # noqa: Q000
default=False)
more_tasks = [] # type: List[Dict[str, Any]]
ids_can_overlap = False
NO_TASK_ID = _("No task_ids found!") # noqa: Q000
TASK_ID_INVALID = _("Task id {} is invalid") # noqa: Q000
PARSED_TASK_IDS = _("Parsed task ids {}") # noqa: Q000
def main(self, *task_ids: TaskId): # type: ignore
super().main()
task_id = [] # type: List[Union[str,int]]
for tids in task_ids:
task_id.extend(tids)
if not task_id:
self.log.error(self.NO_TASK_ID)
return 1
tasks = self.api.tasks.user.get(type=self.domain)
assert isinstance(tasks, list)
tasks.extend(self.more_tasks)
task_uuids = [task['id'] for task in tasks]
num_tasks = len(tasks)
aliases = {task['alias']: task for task in tasks if 'alias' in task}
self.changing_tasks = {} # type: Dict[Union[str], Dict[str, Any]]
changing_tasks_ids = [] # type: List[str]
for tid in task_id:
if isinstance(tid, int):
if 0 <= tid <= num_tasks:
changing_tasks_ids.append(task_uuids[tid])
self.changing_tasks[task_uuids[tid]] = tasks[tid]
continue
elif isinstance(tid, str):
if tid in task_uuids:
changing_tasks_ids.append(tid)
self.changing_tasks[tid] = tasks[task_uuids.index(tid)]
continue
elif tid in aliases:
t_id = aliases[tid]['id']
changing_tasks_ids.append(t_id)
self.changing_tasks[t_id] = aliases[tid]
continue
self.log.error(self.TASK_ID_INVALID.format(tid))
return 1
idstr = ' '.join(self.changing_tasks.keys())
self.log.info(self.PARSED_TASK_IDS.format(idstr)) # noqa: Q000
self.tasks = self.api.tasks
if not self.ids_can_overlap:
changing_tasks_ids = list(set(changing_tasks_ids))
for tid in changing_tasks_ids:
if not self.noop:
self.op(tid)
res = self.log_op(tid)
print(prettify(res))
self.domain_print()
def validate(self, task): # pylint: disable=no-self-use,unused-argument
"""check if task is valid for the operation"""
return True
def op(self, tid):
"""operation to be done on task with `tid`"""
raise NotImplementedError
def log_op(self, tid):
"""return a message to show user on successful change of `tid`"""
raise NotImplementedError
def domain_print(self):
"""show domain to user again"""
raise NotImplementedError
class HabitsChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'habits'
ids_can_overlap = True
def domain_print(self):
Habits.invoke(config_filename=self.config_filename)
@Habits.subcommand('add') # pylint: disable=missing-docstring
class HabitsAdd(ApplicationWithApi):
DESCRIPTION = _("Add a habit <habit>") # noqa: Q000
priority = cli.SwitchAttr(
['-p', '--priority'],
cli.Set('0.1', '1', '1.5', '2'), default='1',
help=_("Priority (complexity) of a habit")) # noqa: Q000
direction = cli.SwitchAttr(
['-d', '--direction'],
cli.Set('positive', 'negative', 'both'), default='both',
help=_("positive/negative/both")) # noqa: Q000
def main(self, *habit: str):
habit_str = ' '.join(habit)
if not habit_str:
self.log.error(_("Empty habit text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(
type='habit', text=habit_str,
priority=self.priority, up=(self.direction != 'negative'),
down=(self.direction != 'positive'))
res = _("Added habit '{}' with priority {} and direction {}").format( # noqa: Q000
habit_str, self.priority, self.direction)
print(prettify(res))
Habits.invoke(config_filename=self.config_filename)
return None
@Habits.subcommand('delete') # pylint: disable=missing-docstring
class HabitsDelete(HabitsChange):
DESCRIPTION = _("Delete a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].delete()
def log_op(self, tid):
return _("Deleted habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Habits.subcommand('up') # pylint: disable=missing-docstring
class HabitsUp(HabitsChange):
DESCRIPTION = _("Up (+) a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def validate(self, task):
return task['up']
def log_op(self, tid):
return _("Incremented habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Habits.subcommand('down') # pylint: disable=missing-docstring
class HabitsDown(HabitsChange):
DESCRIPTION = _("Down (-) a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['down'].post()
def validate(self, task):
return task['down']
def log_op(self, tid):
"""show a message to user on successful change of `tid`"""
return _("Decremented habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
class DailysChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'dailys'
def domain_print(self):
Dailys.invoke(config_filename=self.config_filename)
@Dailys.subcommand('done') # pylint: disable=missing-docstring
class DailysUp(DailysChange):
DESCRIPTION = _("Check a dayly with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Completed daily {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Dailys.subcommand('undo') # pylint: disable=missing-docstring
class DailyDown(DailysChange):
DESCRIPTION = _("Uncheck a daily with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['down'].post()
def log_op(self, tid):
return _("Unchecked daily {text}").format(**self.changing_tasks[tid]) # noqa: Q000
class TodosChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'todos'
def domain_print(self):
ToDos.invoke(config_filename=self.config_filename)
@ToDos.subcommand('done') # pylint: disable=missing-docstring
class TodosUp(TodosChange):
DESCRIPTION = _("Check a todo with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Completed todo {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@ToDos.subcommand('delete') # pylint: disable=missing-docstring
class TodosDelete(TodosChange):
DESCRIPTION = _("Delete a todo with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].delete()
def log_op(self, tid):
return _("Deleted todo {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@ToDos.subcommand('add') # pylint: disable=missing-docstring
class TodosAdd(ApplicationWithApi):
DESCRIPTION = _("Add a todo <todo>") # noqa: Q000
priority = cli.SwitchAttr(
['-p', '--priority'],
cli.Set('0.1', '1', '1.5', '2'), default='1',
help=_("Priority (complexity) of a todo")) # noqa: Q000
def main(self, *todo: str):
todo_str = ' '.join(todo)
if not todo_str:
self.log.error(_("Empty todo text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(type='todo', text=todo_str, priority=self.priority)
res = _("Added todo '{}' with priority {}").format(todo_str, self.priority) # noqa: Q000
print(prettify(res))
ToDos.invoke(config_filename=self.config_filename)
return 0
RewardId = TaskId
@Rewards.subcommand('buy') # pylint: disable=missing-docstring
class RewardsBuy(TasksChange):
DESCRIPTION = _("Buy a reward with reward_id") # noqa: Q000
domain = 'rewards'
ids_can_overlap = True
NO_TASK_ID = _("No reward_ids found!") # noqa: Q000
TASK_ID_INVALID = _("Reward id {} is invalid") # noqa: Q000
PARSED_TASK_IDS = _("Parsed reward ids {}") # noqa: Q000
def main(self, *reward_id: RewardId):
ApplicationWithApi.main(self)
self.more_tasks = get_additional_rewards(self.api)
super().main(*reward_id)
def op(self, tid):
t = self.changing_tasks[tid]
if t['type'] != 'rewards':
self.api.user.buy[t['key']].post()
else:
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Bought reward {text}").format(**self.changing_tasks[tid]) # noqa: Q000
def domain_print(self):
Rewards.invoke(config_filename=self.config_filename)
@Rewards.subcommand('add') # pylint: disable=missing-docstring
class RewardsAdd(ApplicationWithApi):
DESCRIPTION = _("Add a reward <reward>") # noqa: Q000
cost = cli.SwitchAttr(
['--cost'], default='10',
help=_("Cost of a reward (gp)")) # noqa: Q000
def main(self, *reward: str):
todo_str = ' '.join(reward)
if not todo_str:
self.log.error(_("Empty reward text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(type='reward', text=todo_str, value=self.cost)
res = _("Added reward '{}' with cost {}").format(todo_str, self.cost) # noqa: Q000
print(prettify(res))
Rewards.invoke(config_filename=self.config_filename)
return 0
@HabiticaCli.subcommand('home') # pylint: disable=missing-docstring
class Home(ConfiguredApplication):
DESCRIPTION = _("Open habitica site in browser") # noqa: Q000
def main(self):
super().main()
from webbrowser import open_new_tab
HABITICA_TASKS_PAGE = '/#/tasks'
home_url = '{}{}'.format(self.config['url'], HABITICA_TASKS_PAGE)
print(_("Opening {}").format(home_url)) # noqa: Q000
open_new_tab(home_url)
@HabiticaCli.subcommand('server') # pylint: disable=missing-docstring
class Server(ApplicationWithApi):
DESCRIPTION = _("Check habitica server availability") # noqa: Q000
def main(self):
super().main()
try:
ret = self.api.status.get()
if isinstance(ret, dict) and ret['status'] == 'up':
print(_("Habitica server {} online").format(self.config['url'])) # noqa: Q000
return 0
except (KeyError, requests.exceptions.ConnectionError):
pass
msg = _("Habitica server {} offline or there is some issue with it") # noqa: Q000
print(msg.format(self.config['url']))
return -1
@HabiticaCli.subcommand('spells') # pylint: disable=missing-docstring
class Spells(ApplicationWithApi):
DESCRIPTION = _("Prints all available spells") # noqa: Q000
def main(self):
if self.nested_command:
return
super().main()
user = self.api.user.get()
content = get_content(self.api)
user_level = user['stats']['lvl']
if user_level < 10:
print(_("Your level is too low. Come back on level 10 or higher")) # noqa: Q000
user_class = user['stats']['class']
user_spells = [
v for k, v in content['spells'][user_class].items()
if user_level > v['lvl']
]
print(_("You are a {} of level {}").format(_(user_class), user_level)) # noqa: Q000
for spell in sorted(user_spells, key=lambda x: x['lvl']):
msg = _("[{key}] {text} ({mana}:droplet:) - {notes}").format(**spell) # noqa: Q000
print(msg)
subcommands_file = local.path(SUBCOMMANDS_JSON)
if subcommands_file.exists():
try:
with open(subcommands_file) as subcommands_file_obj:
subcommands = json.load(subcommands_file_obj)
del subcommands_file_obj
for name, module in subcommands.items():
HabiticaCli.subcommand(name, module)
del name
del module
del subcommands
except (AttributeError, JSONDecodeError) as error:
warnings.warn('subcommands.json found, but it is invalid: {}'.format(error))
del error
del subcommands_file
if __name__ == '__main__':
HabiticaCli.run()
|
ASMfreaK/habitipy | habitipy/cli.py | get_content | python | def get_content(api, rebuild_cache=False):
if hasattr(get_content, 'cache') and not rebuild_cache:
return get_content.cache
if not os.path.exists(CONTENT_JSON) or rebuild_cache:
import locale
content_endpoint = api.content.get
# pylint: disable=protected-access
try_langs = []
try:
lang = get_translation_for('habitipy').info()['language']
try_langs.append(lang)
except KeyError:
pass
try:
loc = locale.getdefaultlocale()[0]
if loc:
try_langs.append(loc)
try_langs.append(loc[:2])
except IndexError:
pass
server_lang = content_endpoint._node.params['query']['language']
# handle something like 'ru_RU' not available - only 'ru'
for lang in try_langs:
if lang in server_lang.possible_values:
loc = {'language': lang}
break
else:
loc = {}
get_content.cache = content = content_endpoint(**loc)
with open(CONTENT_JSON, 'w') as f:
json.dump(content, f)
return content
try:
with open(CONTENT_JSON) as f:
get_content.cache = content = json.load(f)
return content
except JSONDecodeError:
return get_content(api, rebuild_cache=True) | get content from server or cache | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L126-L164 | [
"def get_translation_for(package_name: str) -> gettext.NullTranslations:\n \"\"\"find and return gettext translation for package\"\"\"\n localedir = None\n for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:\n localefile = gettext.find(package_name, localedir) # type: igno... | """
habitipy - tools and library for Habitica restful API
command-line interface library using plumbum
"""
# pylint: disable=arguments-differ, attribute-defined-outside-init,ungrouped-imports
# pylint: disable=invalid-name, logging-format-interpolation,too-few-public-methods
import warnings
import logging
import os
import json
import uuid
from bisect import bisect
from textwrap import dedent
from typing import List, Union, Dict, Any # pylint: disable=unused-import
import pkg_resources
from plumbum import local, cli, colors
import requests
from .api import Habitipy
from .util import assert_secure_file, secure_filestore
from .util import get_translation_functions, get_translation_for
from .util import prettify
try:
from json import JSONDecodeError # type: ignore
except ImportError:
JSONDecodeError = ValueError # type: ignore
DEFAULT_CONF = '~/.config/habitipy/config'
SUBCOMMANDS_JSON = '~/.config/habitipy/subcommands.json'
CONTENT_JSON = local.path('~/.config/habitipy/content.json')
_, ngettext = get_translation_functions('habitipy', names=('gettext', 'ngettext'))
CLASSES = [_("warrior"), _("rogue"), _("wizard"), _("healer")] # noqa: Q000
YES_ANSWERS = ('yes', 'y', 'true', 'True', '1')
CHECK_MARK_STYLES = ('wide', 'narrow', 'ascii')
CHECK = {
'wide': colors.green | '✔ ',
'narrow': colors.green | '✔',
'ascii': '[X]'
}
UNCHECK = {
'wide': colors.red | '✖ ',
'narrow': colors.red | '✖',
'ascii': '[ ]'
}
def is_uuid(u):
"""validator for plumbum prompt"""
if isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex:
return u
return False
def load_conf(configfile, config=None):
"""Get authentication data from the AUTH_CONF file."""
default_login = 'your-login-for-api-here'
default_password = 'your-password-for-api-here'
config = config or {}
configfile = local.path(configfile)
if not configfile.exists():
configfile.dirname.mkdir()
else:
assert_secure_file(configfile)
with secure_filestore(), cli.Config(configfile) as conf:
config['url'] = conf.get('habitipy.url', 'https://habitica.com')
config['login'] = conf.get('habitipy.login', default_login)
config['password'] = conf.get('habitipy.password', default_password)
if config['login'] == default_login or config['password'] == default_password:
if cli.terminal.ask(
_("""Your creditentials are invalid. Do you want to enter them now?"""),
default=True):
msg = _("""
You can get your login information at
https://habitica.com/#/options/settings/api
Both your user id and API token should look like this:
xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
where 'x' is a number between 0-9 or a character a-f.
""")
print(dedent(msg))
msg = _("""Please enter your login (user ID)""")
config['login'] = cli.terminal.prompt(msg, validator=is_uuid)
msg = _("""Please enter your password (API token)""")
config['password'] = cli.terminal.prompt(msg, validator=is_uuid)
conf.set('habitipy.login', config['login'])
conf.set('habitipy.password', config['password'])
print(dedent(_("""
Your creditentials are securely stored in
{configfile}
You can edit that file later if you need.
""")).format(configfile=configfile))
config['show_numbers'] = conf.get('habitipy.show_numbers', 'y')
config['show_numbers'] = config['show_numbers'] in YES_ANSWERS
config['show_style'] = conf.get('habitipy.show_style', 'wide')
if config['show_style'] not in CHECK_MARK_STYLES:
config['show_style'] = 'wide'
return config
class ConfiguredApplication(cli.Application):
"""Application with config"""
config_filename = cli.SwitchAttr(
['-c', '--config'], argtype=local.path, default=DEFAULT_CONF,
argname='CONFIG',
help=_("Use file CONFIG for config")) # noqa: Q000
verbose = cli.Flag(
['-v', '--verbose'],
help=_("Verbose output - log everything."), # noqa: Q000
excludes=['-s', '--silent'])
silence_level = cli.CountOf(
['-s', '--silent'],
help=_("Make program more silent"), # noqa: Q000
excludes=['-v', '--verbose'])
def main(self):
self.config = load_conf(self.config_filename)
self.log = logging.getLogger(str(self.__class__).split("'")[1])
self.log.addHandler(logging.StreamHandler())
if self.verbose:
self.log.setLevel(logging.DEBUG)
else:
base_level = logging.INFO
self.log.setLevel(base_level + 10 * self.silence_level)
class ApplicationWithApi(ConfiguredApplication):
"""Application with configured Habitica API"""
api = None # type: Habitipy
def main(self):
super().main()
self.api = Habitipy(self.config)
class HabiticaCli(ConfiguredApplication): # pylint: disable=missing-docstring
DESCRIPTION = _("tools and library for Habitica restful API") # noqa: Q000
VERSION = pkg_resources.get_distribution('habitipy').version
def main(self):
if self.nested_command:
return
super().main()
self.log.error(_("No subcommand given, exiting")) # noqa: Q000
@HabiticaCli.subcommand('status') # pylint: disable=missing-docstring
class Status(ApplicationWithApi):
DESCRIPTION = _("Show HP, XP, GP, and more") # noqa: Q000
def main(self):
super().main()
user = self.api.user.get()
for key in ['hp', 'mp', 'exp']:
user['stats'][key] = round(user['stats'][key])
user['stats']['class'] = _(user['stats']['class']).capitalize()
user['food'] = sum(user['items']['food'].values())
content = get_content(self.api)
user['pet'] = user['items']['currentPet'] if 'currentPet' in user['items'] else None
user['pet'] = content['petInfo'][user['pet']]['text'] if user['pet'] else ''
user['pet'] = _("Pet: ") + user['pet'] if user['pet'] else _("No pet") # noqa: Q000
user['mount'] = user['items'].get('currentMount', None)
user['mount'] = content['mountInfo'][user['mount']]['text'] if user['mount'] else ''
if user['mount']:
user['mount'] = _("Mount: ") + user['mount'] # noqa: Q000
else:
user['mount'] = _("No mount") # noqa: Q000
level = _("\nLevel {stats[lvl]} {stats[class]}\n").format(**user) # noqa: Q000
highlight = '-' * (len(level) - 2)
level = highlight + level + highlight
result = [
level,
colors.red | _("Health: {stats[hp]}/{stats[maxHealth]}"), # noqa: Q000
colors.yellow | _("XP: {stats[exp]}/{stats[toNextLevel]}"), # noqa: Q000
colors.blue | _("Mana: {stats[mp]}/{stats[maxMP]}"), # noqa: Q000
colors.light_yellow | _("GP: {stats[gp]:.2f}"), # noqa: Q000
'{pet} ' + ngettext(
"({food} food item)", # noqa: Q000
"({food} food items)", # noqa: Q000
user['food']),
'{mount}']
quest = self.quest_info(user)
if quest:
result.append(quest)
print('\n'.join(result).format(**user))
def quest_info(self, user):
"""Get current quest info or return None"""
key = user['party']['quest'].get('key', None)
if '_id' not in user['party'] or key is None:
return None
for refresh in False, True:
content = get_content(self.api, refresh)
quest = content['quests'].get(key, None)
if quest:
break
else:
self.log.warning(dedent(_(
"""Quest {} not found in Habitica's content.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
return None
for quest_type, quest_template in (
('collect', _("""
Quest: {quest[text]} (collect-type)
{user[party][quest][progress][collectedItems]} quest items collected
""")),
('boss', _("""
Quest: {quest[text]} (boss)
{user[party][quest][progress][up]:.1f} damage will be dealt to {quest[boss][name]}
"""))):
if quest_type in quest:
try:
return dedent(quest_template.format(quest=quest, user=user))[1:-1]
except KeyError:
self.log.warning(dedent(_(
"""Something went wrong when formatting quest {}.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
return None
self.log.warning(dedent(_(
"""Quest {} isn't neither a collect-type or a boss-type.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
class ScoreInfo:
"""task value/score info: http://habitica.wikia.com/wiki/Task_Value"""
scores = {
'wide': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'narrow': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'ascii': ['*', '**', '***', '****', '*****', '******', '*******']
}
colors_ = ['Red3', 'Red1', 'DarkOrange', 'Gold3A', 'Green', 'LightCyan3', 'Cyan1']
breakpoints = [-20, -10, -1, 1, 5, 10]
def __new__(cls, style, value):
index = bisect(cls.breakpoints, value)
score = cls.scores[style][index]
score_col = colors.fg(cls.colors_[index])
if style == 'ascii':
max_scores_len = max(map(len, cls.scores[style]))
score = '[' + score.center(max_scores_len) + ']'
# score = '⎡' + score.center(cls.max_scores_len) + '⎤'
return score_col | score
@classmethod
def color(cls, value):
"""task value/score color"""
index = bisect(cls.breakpoints, value)
return colors.fg(cls.colors_[index])
class TasksPrint(ApplicationWithApi):
"""Put all tasks from `domain` to print"""
domain = '' # type: str
more_tasks = [] # type: List[Dict[str, Any]]
def domain_format(self, task):
"""format task for domain"""
raise NotImplementedError()
def main(self):
if self.nested_command:
return
super().main()
tasks = self.api.tasks.user.get(type=self.domain)
tasks.extend(self.more_tasks)
habits_len = len(tasks)
ident_size = len(str(habits_len)) + 2
number_format = '{{:{}d}}. '.format(ident_size - 2)
for i, task in enumerate(tasks):
i = number_format.format(i + 1) if self.config['show_numbers'] else ''
res = i + prettify(self.domain_format(task))
print(res)
@HabiticaCli.subcommand('habits') # pylint: disable=missing-docstring
class Habits(TasksPrint):
DESCRIPTION = _("List, up and down habit tasks") # noqa: Q000
domain = 'habits'
def domain_format(self, habit):
score = ScoreInfo(self.config['show_style'], habit['value'])
return _("{0} {text}").format(score, **habit) # noqa: Q000
@HabiticaCli.subcommand('dailies') # pylint: disable=missing-docstring
class Dailys(TasksPrint):
DESCRIPTION = _("List, check, uncheck daily tasks") # noqa: Q000
domain = 'dailys'
def domain_format(self, daily):
score = ScoreInfo(self.config['show_style'], daily['value'])
check = CHECK if daily['completed'] else UNCHECK
check = check[self.config['show_style']]
checklist_done = len(list(filter(lambda x: x['completed'], daily['checklist'])))
checklist = \
' {}/{}'.format(
checklist_done,
len(daily['checklist'])
) if daily['checklist'] else ''
res = _("{0}{1}{text}{2}").format(check, score, checklist, **daily) # noqa: Q000
if not daily['isDue']:
res = colors.strikeout + colors.dark_gray | res
return res
@HabiticaCli.subcommand('todos') # pylint: disable=missing-docstring
class ToDos(TasksPrint):
DESCRIPTION = _("List, comlete, add or delete todo tasks") # noqa: Q000
domain = 'todos'
def domain_format(self, todo):
score = ScoreInfo(self.config['show_style'], todo['value'])
check = CHECK if todo['completed'] else UNCHECK
check = check[self.config['show_style']]
checklist_done = len(list(filter(lambda x: x['completed'], todo['checklist'])))
checklist = \
' {}/{}'.format(
checklist_done,
len(todo['checklist'])
) if todo['checklist'] else ''
res = _("{1}{0}{text}{2}").format(check, score, checklist, **todo) # noqa: Q000
return res
def get_additional_rewards(api):
"""returns list of non-user rewards (potion, armoire, gear)"""
c = get_content(api)
tasks = [c[i] for i in ['potion', 'armoire']]
tasks.extend(api.user.inventory.buy.get())
for task in tasks:
task['id'] = task['alias'] = task['key']
return tasks
@HabiticaCli.subcommand('rewards') # pylint: disable=missing-docstring
class Rewards(TasksPrint):
DESCRIPTION = _("List, buy and add rewards") # noqa: Q000
domain = 'rewards'
def main(self):
if self.nested_command:
return
ApplicationWithApi.main(self)
self.more_tasks = get_additional_rewards(self.api)
super().main()
def domain_format(self, reward):
score = colors.yellow | _("{value} gp").format(**reward) # noqa: Q000
return _("{} {text}").format(score, **reward) # noqa: Q000
class TaskId(List[Union[str, int]]):
"""
handle task-id formats such as:
habitica todos done 3 taskalias_or_uuid
habitica todos done 1,2,3,taskalias_or_uuid
habitica todos done 2 3
habitica todos done 1-3,4 8
"""
def __new__(cls, tids: str):
task_ids = [] # type: List[Union[str, int]]
for bit in tids.split(','):
try:
if '-' in bit:
start, stop = [int(e) for e in bit.split('-')]
task_ids.extend(range(start, stop + 1))
else:
task_ids.append(int(bit))
except ValueError:
task_ids.append(bit)
return [e - 1 if isinstance(e, int) else e for e in task_ids] # type: ignore
class TasksChange(ApplicationWithApi):
"""find all tasks specified by user and do self.op on them"""
domain = '' # type: str
noop = cli.Flag(
['--dry-run', '--noop'],
help=_("If passed, won't actually change anything on habitipy server"), # noqa: Q000
default=False)
more_tasks = [] # type: List[Dict[str, Any]]
ids_can_overlap = False
NO_TASK_ID = _("No task_ids found!") # noqa: Q000
TASK_ID_INVALID = _("Task id {} is invalid") # noqa: Q000
PARSED_TASK_IDS = _("Parsed task ids {}") # noqa: Q000
def main(self, *task_ids: TaskId): # type: ignore
super().main()
task_id = [] # type: List[Union[str,int]]
for tids in task_ids:
task_id.extend(tids)
if not task_id:
self.log.error(self.NO_TASK_ID)
return 1
tasks = self.api.tasks.user.get(type=self.domain)
assert isinstance(tasks, list)
tasks.extend(self.more_tasks)
task_uuids = [task['id'] for task in tasks]
num_tasks = len(tasks)
aliases = {task['alias']: task for task in tasks if 'alias' in task}
self.changing_tasks = {} # type: Dict[Union[str], Dict[str, Any]]
changing_tasks_ids = [] # type: List[str]
for tid in task_id:
if isinstance(tid, int):
if 0 <= tid <= num_tasks:
changing_tasks_ids.append(task_uuids[tid])
self.changing_tasks[task_uuids[tid]] = tasks[tid]
continue
elif isinstance(tid, str):
if tid in task_uuids:
changing_tasks_ids.append(tid)
self.changing_tasks[tid] = tasks[task_uuids.index(tid)]
continue
elif tid in aliases:
t_id = aliases[tid]['id']
changing_tasks_ids.append(t_id)
self.changing_tasks[t_id] = aliases[tid]
continue
self.log.error(self.TASK_ID_INVALID.format(tid))
return 1
idstr = ' '.join(self.changing_tasks.keys())
self.log.info(self.PARSED_TASK_IDS.format(idstr)) # noqa: Q000
self.tasks = self.api.tasks
if not self.ids_can_overlap:
changing_tasks_ids = list(set(changing_tasks_ids))
for tid in changing_tasks_ids:
if not self.noop:
self.op(tid)
res = self.log_op(tid)
print(prettify(res))
self.domain_print()
def validate(self, task): # pylint: disable=no-self-use,unused-argument
"""check if task is valid for the operation"""
return True
def op(self, tid):
"""operation to be done on task with `tid`"""
raise NotImplementedError
def log_op(self, tid):
"""return a message to show user on successful change of `tid`"""
raise NotImplementedError
def domain_print(self):
"""show domain to user again"""
raise NotImplementedError
class HabitsChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'habits'
ids_can_overlap = True
def domain_print(self):
Habits.invoke(config_filename=self.config_filename)
@Habits.subcommand('add') # pylint: disable=missing-docstring
class HabitsAdd(ApplicationWithApi):
DESCRIPTION = _("Add a habit <habit>") # noqa: Q000
priority = cli.SwitchAttr(
['-p', '--priority'],
cli.Set('0.1', '1', '1.5', '2'), default='1',
help=_("Priority (complexity) of a habit")) # noqa: Q000
direction = cli.SwitchAttr(
['-d', '--direction'],
cli.Set('positive', 'negative', 'both'), default='both',
help=_("positive/negative/both")) # noqa: Q000
def main(self, *habit: str):
habit_str = ' '.join(habit)
if not habit_str:
self.log.error(_("Empty habit text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(
type='habit', text=habit_str,
priority=self.priority, up=(self.direction != 'negative'),
down=(self.direction != 'positive'))
res = _("Added habit '{}' with priority {} and direction {}").format( # noqa: Q000
habit_str, self.priority, self.direction)
print(prettify(res))
Habits.invoke(config_filename=self.config_filename)
return None
@Habits.subcommand('delete') # pylint: disable=missing-docstring
class HabitsDelete(HabitsChange):
DESCRIPTION = _("Delete a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].delete()
def log_op(self, tid):
return _("Deleted habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Habits.subcommand('up') # pylint: disable=missing-docstring
class HabitsUp(HabitsChange):
DESCRIPTION = _("Up (+) a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def validate(self, task):
return task['up']
def log_op(self, tid):
return _("Incremented habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Habits.subcommand('down') # pylint: disable=missing-docstring
class HabitsDown(HabitsChange):
DESCRIPTION = _("Down (-) a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['down'].post()
def validate(self, task):
return task['down']
def log_op(self, tid):
"""show a message to user on successful change of `tid`"""
return _("Decremented habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
class DailysChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'dailys'
def domain_print(self):
Dailys.invoke(config_filename=self.config_filename)
@Dailys.subcommand('done') # pylint: disable=missing-docstring
class DailysUp(DailysChange):
DESCRIPTION = _("Check a dayly with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Completed daily {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Dailys.subcommand('undo') # pylint: disable=missing-docstring
class DailyDown(DailysChange):
DESCRIPTION = _("Uncheck a daily with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['down'].post()
def log_op(self, tid):
return _("Unchecked daily {text}").format(**self.changing_tasks[tid]) # noqa: Q000
class TodosChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'todos'
def domain_print(self):
ToDos.invoke(config_filename=self.config_filename)
@ToDos.subcommand('done') # pylint: disable=missing-docstring
class TodosUp(TodosChange):
DESCRIPTION = _("Check a todo with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Completed todo {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@ToDos.subcommand('delete') # pylint: disable=missing-docstring
class TodosDelete(TodosChange):
DESCRIPTION = _("Delete a todo with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].delete()
def log_op(self, tid):
return _("Deleted todo {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@ToDos.subcommand('add') # pylint: disable=missing-docstring
class TodosAdd(ApplicationWithApi):
DESCRIPTION = _("Add a todo <todo>") # noqa: Q000
priority = cli.SwitchAttr(
['-p', '--priority'],
cli.Set('0.1', '1', '1.5', '2'), default='1',
help=_("Priority (complexity) of a todo")) # noqa: Q000
def main(self, *todo: str):
todo_str = ' '.join(todo)
if not todo_str:
self.log.error(_("Empty todo text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(type='todo', text=todo_str, priority=self.priority)
res = _("Added todo '{}' with priority {}").format(todo_str, self.priority) # noqa: Q000
print(prettify(res))
ToDos.invoke(config_filename=self.config_filename)
return 0
RewardId = TaskId
@Rewards.subcommand('buy') # pylint: disable=missing-docstring
class RewardsBuy(TasksChange):
DESCRIPTION = _("Buy a reward with reward_id") # noqa: Q000
domain = 'rewards'
ids_can_overlap = True
NO_TASK_ID = _("No reward_ids found!") # noqa: Q000
TASK_ID_INVALID = _("Reward id {} is invalid") # noqa: Q000
PARSED_TASK_IDS = _("Parsed reward ids {}") # noqa: Q000
def main(self, *reward_id: RewardId):
ApplicationWithApi.main(self)
self.more_tasks = get_additional_rewards(self.api)
super().main(*reward_id)
def op(self, tid):
t = self.changing_tasks[tid]
if t['type'] != 'rewards':
self.api.user.buy[t['key']].post()
else:
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Bought reward {text}").format(**self.changing_tasks[tid]) # noqa: Q000
def domain_print(self):
Rewards.invoke(config_filename=self.config_filename)
@Rewards.subcommand('add') # pylint: disable=missing-docstring
class RewardsAdd(ApplicationWithApi):
DESCRIPTION = _("Add a reward <reward>") # noqa: Q000
cost = cli.SwitchAttr(
['--cost'], default='10',
help=_("Cost of a reward (gp)")) # noqa: Q000
def main(self, *reward: str):
todo_str = ' '.join(reward)
if not todo_str:
self.log.error(_("Empty reward text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(type='reward', text=todo_str, value=self.cost)
res = _("Added reward '{}' with cost {}").format(todo_str, self.cost) # noqa: Q000
print(prettify(res))
Rewards.invoke(config_filename=self.config_filename)
return 0
@HabiticaCli.subcommand('home') # pylint: disable=missing-docstring
class Home(ConfiguredApplication):
DESCRIPTION = _("Open habitica site in browser") # noqa: Q000
def main(self):
super().main()
from webbrowser import open_new_tab
HABITICA_TASKS_PAGE = '/#/tasks'
home_url = '{}{}'.format(self.config['url'], HABITICA_TASKS_PAGE)
print(_("Opening {}").format(home_url)) # noqa: Q000
open_new_tab(home_url)
@HabiticaCli.subcommand('server') # pylint: disable=missing-docstring
class Server(ApplicationWithApi):
DESCRIPTION = _("Check habitica server availability") # noqa: Q000
def main(self):
super().main()
try:
ret = self.api.status.get()
if isinstance(ret, dict) and ret['status'] == 'up':
print(_("Habitica server {} online").format(self.config['url'])) # noqa: Q000
return 0
except (KeyError, requests.exceptions.ConnectionError):
pass
msg = _("Habitica server {} offline or there is some issue with it") # noqa: Q000
print(msg.format(self.config['url']))
return -1
@HabiticaCli.subcommand('spells') # pylint: disable=missing-docstring
class Spells(ApplicationWithApi):
DESCRIPTION = _("Prints all available spells") # noqa: Q000
def main(self):
if self.nested_command:
return
super().main()
user = self.api.user.get()
content = get_content(self.api)
user_level = user['stats']['lvl']
if user_level < 10:
print(_("Your level is too low. Come back on level 10 or higher")) # noqa: Q000
user_class = user['stats']['class']
user_spells = [
v for k, v in content['spells'][user_class].items()
if user_level > v['lvl']
]
print(_("You are a {} of level {}").format(_(user_class), user_level)) # noqa: Q000
for spell in sorted(user_spells, key=lambda x: x['lvl']):
msg = _("[{key}] {text} ({mana}:droplet:) - {notes}").format(**spell) # noqa: Q000
print(msg)
subcommands_file = local.path(SUBCOMMANDS_JSON)
if subcommands_file.exists():
try:
with open(subcommands_file) as subcommands_file_obj:
subcommands = json.load(subcommands_file_obj)
del subcommands_file_obj
for name, module in subcommands.items():
HabiticaCli.subcommand(name, module)
del name
del module
del subcommands
except (AttributeError, JSONDecodeError) as error:
warnings.warn('subcommands.json found, but it is invalid: {}'.format(error))
del error
del subcommands_file
if __name__ == '__main__':
HabiticaCli.run()
|
ASMfreaK/habitipy | habitipy/cli.py | get_additional_rewards | python | def get_additional_rewards(api):
c = get_content(api)
tasks = [c[i] for i in ['potion', 'armoire']]
tasks.extend(api.user.inventory.buy.get())
for task in tasks:
task['id'] = task['alias'] = task['key']
return tasks | returns list of non-user rewards (potion, armoire, gear) | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L365-L372 | [
"def get_content(api, rebuild_cache=False):\n \"\"\"get content from server or cache\"\"\"\n if hasattr(get_content, 'cache') and not rebuild_cache:\n return get_content.cache\n if not os.path.exists(CONTENT_JSON) or rebuild_cache:\n import locale\n content_endpoint = api.content.get\n... | """
habitipy - tools and library for Habitica restful API
command-line interface library using plumbum
"""
# pylint: disable=arguments-differ, attribute-defined-outside-init,ungrouped-imports
# pylint: disable=invalid-name, logging-format-interpolation,too-few-public-methods
import warnings
import logging
import os
import json
import uuid
from bisect import bisect
from textwrap import dedent
from typing import List, Union, Dict, Any # pylint: disable=unused-import
import pkg_resources
from plumbum import local, cli, colors
import requests
from .api import Habitipy
from .util import assert_secure_file, secure_filestore
from .util import get_translation_functions, get_translation_for
from .util import prettify
try:
from json import JSONDecodeError # type: ignore
except ImportError:
JSONDecodeError = ValueError # type: ignore
DEFAULT_CONF = '~/.config/habitipy/config'
SUBCOMMANDS_JSON = '~/.config/habitipy/subcommands.json'
CONTENT_JSON = local.path('~/.config/habitipy/content.json')
_, ngettext = get_translation_functions('habitipy', names=('gettext', 'ngettext'))
CLASSES = [_("warrior"), _("rogue"), _("wizard"), _("healer")] # noqa: Q000
YES_ANSWERS = ('yes', 'y', 'true', 'True', '1')
CHECK_MARK_STYLES = ('wide', 'narrow', 'ascii')
CHECK = {
'wide': colors.green | '✔ ',
'narrow': colors.green | '✔',
'ascii': '[X]'
}
UNCHECK = {
'wide': colors.red | '✖ ',
'narrow': colors.red | '✖',
'ascii': '[ ]'
}
def is_uuid(u):
"""validator for plumbum prompt"""
if isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex:
return u
return False
def load_conf(configfile, config=None):
"""Get authentication data from the AUTH_CONF file."""
default_login = 'your-login-for-api-here'
default_password = 'your-password-for-api-here'
config = config or {}
configfile = local.path(configfile)
if not configfile.exists():
configfile.dirname.mkdir()
else:
assert_secure_file(configfile)
with secure_filestore(), cli.Config(configfile) as conf:
config['url'] = conf.get('habitipy.url', 'https://habitica.com')
config['login'] = conf.get('habitipy.login', default_login)
config['password'] = conf.get('habitipy.password', default_password)
if config['login'] == default_login or config['password'] == default_password:
if cli.terminal.ask(
_("""Your creditentials are invalid. Do you want to enter them now?"""),
default=True):
msg = _("""
You can get your login information at
https://habitica.com/#/options/settings/api
Both your user id and API token should look like this:
xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
where 'x' is a number between 0-9 or a character a-f.
""")
print(dedent(msg))
msg = _("""Please enter your login (user ID)""")
config['login'] = cli.terminal.prompt(msg, validator=is_uuid)
msg = _("""Please enter your password (API token)""")
config['password'] = cli.terminal.prompt(msg, validator=is_uuid)
conf.set('habitipy.login', config['login'])
conf.set('habitipy.password', config['password'])
print(dedent(_("""
Your creditentials are securely stored in
{configfile}
You can edit that file later if you need.
""")).format(configfile=configfile))
config['show_numbers'] = conf.get('habitipy.show_numbers', 'y')
config['show_numbers'] = config['show_numbers'] in YES_ANSWERS
config['show_style'] = conf.get('habitipy.show_style', 'wide')
if config['show_style'] not in CHECK_MARK_STYLES:
config['show_style'] = 'wide'
return config
class ConfiguredApplication(cli.Application):
"""Application with config"""
config_filename = cli.SwitchAttr(
['-c', '--config'], argtype=local.path, default=DEFAULT_CONF,
argname='CONFIG',
help=_("Use file CONFIG for config")) # noqa: Q000
verbose = cli.Flag(
['-v', '--verbose'],
help=_("Verbose output - log everything."), # noqa: Q000
excludes=['-s', '--silent'])
silence_level = cli.CountOf(
['-s', '--silent'],
help=_("Make program more silent"), # noqa: Q000
excludes=['-v', '--verbose'])
def main(self):
self.config = load_conf(self.config_filename)
self.log = logging.getLogger(str(self.__class__).split("'")[1])
self.log.addHandler(logging.StreamHandler())
if self.verbose:
self.log.setLevel(logging.DEBUG)
else:
base_level = logging.INFO
self.log.setLevel(base_level + 10 * self.silence_level)
def get_content(api, rebuild_cache=False):
"""get content from server or cache"""
if hasattr(get_content, 'cache') and not rebuild_cache:
return get_content.cache
if not os.path.exists(CONTENT_JSON) or rebuild_cache:
import locale
content_endpoint = api.content.get
# pylint: disable=protected-access
try_langs = []
try:
lang = get_translation_for('habitipy').info()['language']
try_langs.append(lang)
except KeyError:
pass
try:
loc = locale.getdefaultlocale()[0]
if loc:
try_langs.append(loc)
try_langs.append(loc[:2])
except IndexError:
pass
server_lang = content_endpoint._node.params['query']['language']
# handle something like 'ru_RU' not available - only 'ru'
for lang in try_langs:
if lang in server_lang.possible_values:
loc = {'language': lang}
break
else:
loc = {}
get_content.cache = content = content_endpoint(**loc)
with open(CONTENT_JSON, 'w') as f:
json.dump(content, f)
return content
try:
with open(CONTENT_JSON) as f:
get_content.cache = content = json.load(f)
return content
except JSONDecodeError:
return get_content(api, rebuild_cache=True)
class ApplicationWithApi(ConfiguredApplication):
"""Application with configured Habitica API"""
api = None # type: Habitipy
def main(self):
super().main()
self.api = Habitipy(self.config)
class HabiticaCli(ConfiguredApplication): # pylint: disable=missing-docstring
DESCRIPTION = _("tools and library for Habitica restful API") # noqa: Q000
VERSION = pkg_resources.get_distribution('habitipy').version
def main(self):
if self.nested_command:
return
super().main()
self.log.error(_("No subcommand given, exiting")) # noqa: Q000
@HabiticaCli.subcommand('status') # pylint: disable=missing-docstring
class Status(ApplicationWithApi):
DESCRIPTION = _("Show HP, XP, GP, and more") # noqa: Q000
def main(self):
super().main()
user = self.api.user.get()
for key in ['hp', 'mp', 'exp']:
user['stats'][key] = round(user['stats'][key])
user['stats']['class'] = _(user['stats']['class']).capitalize()
user['food'] = sum(user['items']['food'].values())
content = get_content(self.api)
user['pet'] = user['items']['currentPet'] if 'currentPet' in user['items'] else None
user['pet'] = content['petInfo'][user['pet']]['text'] if user['pet'] else ''
user['pet'] = _("Pet: ") + user['pet'] if user['pet'] else _("No pet") # noqa: Q000
user['mount'] = user['items'].get('currentMount', None)
user['mount'] = content['mountInfo'][user['mount']]['text'] if user['mount'] else ''
if user['mount']:
user['mount'] = _("Mount: ") + user['mount'] # noqa: Q000
else:
user['mount'] = _("No mount") # noqa: Q000
level = _("\nLevel {stats[lvl]} {stats[class]}\n").format(**user) # noqa: Q000
highlight = '-' * (len(level) - 2)
level = highlight + level + highlight
result = [
level,
colors.red | _("Health: {stats[hp]}/{stats[maxHealth]}"), # noqa: Q000
colors.yellow | _("XP: {stats[exp]}/{stats[toNextLevel]}"), # noqa: Q000
colors.blue | _("Mana: {stats[mp]}/{stats[maxMP]}"), # noqa: Q000
colors.light_yellow | _("GP: {stats[gp]:.2f}"), # noqa: Q000
'{pet} ' + ngettext(
"({food} food item)", # noqa: Q000
"({food} food items)", # noqa: Q000
user['food']),
'{mount}']
quest = self.quest_info(user)
if quest:
result.append(quest)
print('\n'.join(result).format(**user))
def quest_info(self, user):
"""Get current quest info or return None"""
key = user['party']['quest'].get('key', None)
if '_id' not in user['party'] or key is None:
return None
for refresh in False, True:
content = get_content(self.api, refresh)
quest = content['quests'].get(key, None)
if quest:
break
else:
self.log.warning(dedent(_(
"""Quest {} not found in Habitica's content.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
return None
for quest_type, quest_template in (
('collect', _("""
Quest: {quest[text]} (collect-type)
{user[party][quest][progress][collectedItems]} quest items collected
""")),
('boss', _("""
Quest: {quest[text]} (boss)
{user[party][quest][progress][up]:.1f} damage will be dealt to {quest[boss][name]}
"""))):
if quest_type in quest:
try:
return dedent(quest_template.format(quest=quest, user=user))[1:-1]
except KeyError:
self.log.warning(dedent(_(
"""Something went wrong when formatting quest {}.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
return None
self.log.warning(dedent(_(
"""Quest {} isn't neither a collect-type or a boss-type.
Please file an issue to https://github.com/ASMfreaK/habitipy/issues
""")).format(key))
class ScoreInfo:
"""task value/score info: http://habitica.wikia.com/wiki/Task_Value"""
scores = {
'wide': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'narrow': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'ascii': ['*', '**', '***', '****', '*****', '******', '*******']
}
colors_ = ['Red3', 'Red1', 'DarkOrange', 'Gold3A', 'Green', 'LightCyan3', 'Cyan1']
breakpoints = [-20, -10, -1, 1, 5, 10]
def __new__(cls, style, value):
index = bisect(cls.breakpoints, value)
score = cls.scores[style][index]
score_col = colors.fg(cls.colors_[index])
if style == 'ascii':
max_scores_len = max(map(len, cls.scores[style]))
score = '[' + score.center(max_scores_len) + ']'
# score = '⎡' + score.center(cls.max_scores_len) + '⎤'
return score_col | score
@classmethod
def color(cls, value):
"""task value/score color"""
index = bisect(cls.breakpoints, value)
return colors.fg(cls.colors_[index])
class TasksPrint(ApplicationWithApi):
"""Put all tasks from `domain` to print"""
domain = '' # type: str
more_tasks = [] # type: List[Dict[str, Any]]
def domain_format(self, task):
"""format task for domain"""
raise NotImplementedError()
def main(self):
if self.nested_command:
return
super().main()
tasks = self.api.tasks.user.get(type=self.domain)
tasks.extend(self.more_tasks)
habits_len = len(tasks)
ident_size = len(str(habits_len)) + 2
number_format = '{{:{}d}}. '.format(ident_size - 2)
for i, task in enumerate(tasks):
i = number_format.format(i + 1) if self.config['show_numbers'] else ''
res = i + prettify(self.domain_format(task))
print(res)
@HabiticaCli.subcommand('habits') # pylint: disable=missing-docstring
class Habits(TasksPrint):
DESCRIPTION = _("List, up and down habit tasks") # noqa: Q000
domain = 'habits'
def domain_format(self, habit):
score = ScoreInfo(self.config['show_style'], habit['value'])
return _("{0} {text}").format(score, **habit) # noqa: Q000
@HabiticaCli.subcommand('dailies') # pylint: disable=missing-docstring
class Dailys(TasksPrint):
DESCRIPTION = _("List, check, uncheck daily tasks") # noqa: Q000
domain = 'dailys'
def domain_format(self, daily):
score = ScoreInfo(self.config['show_style'], daily['value'])
check = CHECK if daily['completed'] else UNCHECK
check = check[self.config['show_style']]
checklist_done = len(list(filter(lambda x: x['completed'], daily['checklist'])))
checklist = \
' {}/{}'.format(
checklist_done,
len(daily['checklist'])
) if daily['checklist'] else ''
res = _("{0}{1}{text}{2}").format(check, score, checklist, **daily) # noqa: Q000
if not daily['isDue']:
res = colors.strikeout + colors.dark_gray | res
return res
@HabiticaCli.subcommand('todos') # pylint: disable=missing-docstring
class ToDos(TasksPrint):
DESCRIPTION = _("List, comlete, add or delete todo tasks") # noqa: Q000
domain = 'todos'
def domain_format(self, todo):
score = ScoreInfo(self.config['show_style'], todo['value'])
check = CHECK if todo['completed'] else UNCHECK
check = check[self.config['show_style']]
checklist_done = len(list(filter(lambda x: x['completed'], todo['checklist'])))
checklist = \
' {}/{}'.format(
checklist_done,
len(todo['checklist'])
) if todo['checklist'] else ''
res = _("{1}{0}{text}{2}").format(check, score, checklist, **todo) # noqa: Q000
return res
@HabiticaCli.subcommand('rewards') # pylint: disable=missing-docstring
class Rewards(TasksPrint):
DESCRIPTION = _("List, buy and add rewards") # noqa: Q000
domain = 'rewards'
def main(self):
if self.nested_command:
return
ApplicationWithApi.main(self)
self.more_tasks = get_additional_rewards(self.api)
super().main()
def domain_format(self, reward):
score = colors.yellow | _("{value} gp").format(**reward) # noqa: Q000
return _("{} {text}").format(score, **reward) # noqa: Q000
class TaskId(List[Union[str, int]]):
"""
handle task-id formats such as:
habitica todos done 3 taskalias_or_uuid
habitica todos done 1,2,3,taskalias_or_uuid
habitica todos done 2 3
habitica todos done 1-3,4 8
"""
def __new__(cls, tids: str):
task_ids = [] # type: List[Union[str, int]]
for bit in tids.split(','):
try:
if '-' in bit:
start, stop = [int(e) for e in bit.split('-')]
task_ids.extend(range(start, stop + 1))
else:
task_ids.append(int(bit))
except ValueError:
task_ids.append(bit)
return [e - 1 if isinstance(e, int) else e for e in task_ids] # type: ignore
class TasksChange(ApplicationWithApi):
"""find all tasks specified by user and do self.op on them"""
domain = '' # type: str
noop = cli.Flag(
['--dry-run', '--noop'],
help=_("If passed, won't actually change anything on habitipy server"), # noqa: Q000
default=False)
more_tasks = [] # type: List[Dict[str, Any]]
ids_can_overlap = False
NO_TASK_ID = _("No task_ids found!") # noqa: Q000
TASK_ID_INVALID = _("Task id {} is invalid") # noqa: Q000
PARSED_TASK_IDS = _("Parsed task ids {}") # noqa: Q000
def main(self, *task_ids: TaskId): # type: ignore
super().main()
task_id = [] # type: List[Union[str,int]]
for tids in task_ids:
task_id.extend(tids)
if not task_id:
self.log.error(self.NO_TASK_ID)
return 1
tasks = self.api.tasks.user.get(type=self.domain)
assert isinstance(tasks, list)
tasks.extend(self.more_tasks)
task_uuids = [task['id'] for task in tasks]
num_tasks = len(tasks)
aliases = {task['alias']: task for task in tasks if 'alias' in task}
self.changing_tasks = {} # type: Dict[Union[str], Dict[str, Any]]
changing_tasks_ids = [] # type: List[str]
for tid in task_id:
if isinstance(tid, int):
if 0 <= tid <= num_tasks:
changing_tasks_ids.append(task_uuids[tid])
self.changing_tasks[task_uuids[tid]] = tasks[tid]
continue
elif isinstance(tid, str):
if tid in task_uuids:
changing_tasks_ids.append(tid)
self.changing_tasks[tid] = tasks[task_uuids.index(tid)]
continue
elif tid in aliases:
t_id = aliases[tid]['id']
changing_tasks_ids.append(t_id)
self.changing_tasks[t_id] = aliases[tid]
continue
self.log.error(self.TASK_ID_INVALID.format(tid))
return 1
idstr = ' '.join(self.changing_tasks.keys())
self.log.info(self.PARSED_TASK_IDS.format(idstr)) # noqa: Q000
self.tasks = self.api.tasks
if not self.ids_can_overlap:
changing_tasks_ids = list(set(changing_tasks_ids))
for tid in changing_tasks_ids:
if not self.noop:
self.op(tid)
res = self.log_op(tid)
print(prettify(res))
self.domain_print()
def validate(self, task): # pylint: disable=no-self-use,unused-argument
"""check if task is valid for the operation"""
return True
def op(self, tid):
"""operation to be done on task with `tid`"""
raise NotImplementedError
def log_op(self, tid):
"""return a message to show user on successful change of `tid`"""
raise NotImplementedError
def domain_print(self):
"""show domain to user again"""
raise NotImplementedError
class HabitsChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'habits'
ids_can_overlap = True
def domain_print(self):
Habits.invoke(config_filename=self.config_filename)
@Habits.subcommand('add') # pylint: disable=missing-docstring
class HabitsAdd(ApplicationWithApi):
DESCRIPTION = _("Add a habit <habit>") # noqa: Q000
priority = cli.SwitchAttr(
['-p', '--priority'],
cli.Set('0.1', '1', '1.5', '2'), default='1',
help=_("Priority (complexity) of a habit")) # noqa: Q000
direction = cli.SwitchAttr(
['-d', '--direction'],
cli.Set('positive', 'negative', 'both'), default='both',
help=_("positive/negative/both")) # noqa: Q000
def main(self, *habit: str):
habit_str = ' '.join(habit)
if not habit_str:
self.log.error(_("Empty habit text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(
type='habit', text=habit_str,
priority=self.priority, up=(self.direction != 'negative'),
down=(self.direction != 'positive'))
res = _("Added habit '{}' with priority {} and direction {}").format( # noqa: Q000
habit_str, self.priority, self.direction)
print(prettify(res))
Habits.invoke(config_filename=self.config_filename)
return None
@Habits.subcommand('delete') # pylint: disable=missing-docstring
class HabitsDelete(HabitsChange):
DESCRIPTION = _("Delete a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].delete()
def log_op(self, tid):
return _("Deleted habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Habits.subcommand('up') # pylint: disable=missing-docstring
class HabitsUp(HabitsChange):
DESCRIPTION = _("Up (+) a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def validate(self, task):
return task['up']
def log_op(self, tid):
return _("Incremented habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Habits.subcommand('down') # pylint: disable=missing-docstring
class HabitsDown(HabitsChange):
DESCRIPTION = _("Down (-) a habit with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['down'].post()
def validate(self, task):
return task['down']
def log_op(self, tid):
"""show a message to user on successful change of `tid`"""
return _("Decremented habit {text}").format(**self.changing_tasks[tid]) # noqa: Q000
class DailysChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'dailys'
def domain_print(self):
Dailys.invoke(config_filename=self.config_filename)
@Dailys.subcommand('done') # pylint: disable=missing-docstring
class DailysUp(DailysChange):
DESCRIPTION = _("Check a dayly with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Completed daily {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@Dailys.subcommand('undo') # pylint: disable=missing-docstring
class DailyDown(DailysChange):
DESCRIPTION = _("Uncheck a daily with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['down'].post()
def log_op(self, tid):
return _("Unchecked daily {text}").format(**self.changing_tasks[tid]) # noqa: Q000
class TodosChange(TasksChange): # pylint: disable=missing-docstring,abstract-method
domain = 'todos'
def domain_print(self):
ToDos.invoke(config_filename=self.config_filename)
@ToDos.subcommand('done') # pylint: disable=missing-docstring
class TodosUp(TodosChange):
DESCRIPTION = _("Check a todo with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Completed todo {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@ToDos.subcommand('delete') # pylint: disable=missing-docstring
class TodosDelete(TodosChange):
DESCRIPTION = _("Delete a todo with task_id") # noqa: Q000
def op(self, tid):
self.tasks[tid].delete()
def log_op(self, tid):
return _("Deleted todo {text}").format(**self.changing_tasks[tid]) # noqa: Q000
@ToDos.subcommand('add') # pylint: disable=missing-docstring
class TodosAdd(ApplicationWithApi):
DESCRIPTION = _("Add a todo <todo>") # noqa: Q000
priority = cli.SwitchAttr(
['-p', '--priority'],
cli.Set('0.1', '1', '1.5', '2'), default='1',
help=_("Priority (complexity) of a todo")) # noqa: Q000
def main(self, *todo: str):
todo_str = ' '.join(todo)
if not todo_str:
self.log.error(_("Empty todo text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(type='todo', text=todo_str, priority=self.priority)
res = _("Added todo '{}' with priority {}").format(todo_str, self.priority) # noqa: Q000
print(prettify(res))
ToDos.invoke(config_filename=self.config_filename)
return 0
RewardId = TaskId
@Rewards.subcommand('buy') # pylint: disable=missing-docstring
class RewardsBuy(TasksChange):
DESCRIPTION = _("Buy a reward with reward_id") # noqa: Q000
domain = 'rewards'
ids_can_overlap = True
NO_TASK_ID = _("No reward_ids found!") # noqa: Q000
TASK_ID_INVALID = _("Reward id {} is invalid") # noqa: Q000
PARSED_TASK_IDS = _("Parsed reward ids {}") # noqa: Q000
def main(self, *reward_id: RewardId):
ApplicationWithApi.main(self)
self.more_tasks = get_additional_rewards(self.api)
super().main(*reward_id)
def op(self, tid):
t = self.changing_tasks[tid]
if t['type'] != 'rewards':
self.api.user.buy[t['key']].post()
else:
self.tasks[tid].score['up'].post()
def log_op(self, tid):
return _("Bought reward {text}").format(**self.changing_tasks[tid]) # noqa: Q000
def domain_print(self):
Rewards.invoke(config_filename=self.config_filename)
@Rewards.subcommand('add') # pylint: disable=missing-docstring
class RewardsAdd(ApplicationWithApi):
DESCRIPTION = _("Add a reward <reward>") # noqa: Q000
cost = cli.SwitchAttr(
['--cost'], default='10',
help=_("Cost of a reward (gp)")) # noqa: Q000
def main(self, *reward: str):
todo_str = ' '.join(reward)
if not todo_str:
self.log.error(_("Empty reward text!")) # noqa: Q000
return 1
super().main()
self.api.tasks.user.post(type='reward', text=todo_str, value=self.cost)
res = _("Added reward '{}' with cost {}").format(todo_str, self.cost) # noqa: Q000
print(prettify(res))
Rewards.invoke(config_filename=self.config_filename)
return 0
@HabiticaCli.subcommand('home') # pylint: disable=missing-docstring
class Home(ConfiguredApplication):
DESCRIPTION = _("Open habitica site in browser") # noqa: Q000
def main(self):
super().main()
from webbrowser import open_new_tab
HABITICA_TASKS_PAGE = '/#/tasks'
home_url = '{}{}'.format(self.config['url'], HABITICA_TASKS_PAGE)
print(_("Opening {}").format(home_url)) # noqa: Q000
open_new_tab(home_url)
@HabiticaCli.subcommand('server') # pylint: disable=missing-docstring
class Server(ApplicationWithApi):
DESCRIPTION = _("Check habitica server availability") # noqa: Q000
def main(self):
super().main()
try:
ret = self.api.status.get()
if isinstance(ret, dict) and ret['status'] == 'up':
print(_("Habitica server {} online").format(self.config['url'])) # noqa: Q000
return 0
except (KeyError, requests.exceptions.ConnectionError):
pass
msg = _("Habitica server {} offline or there is some issue with it") # noqa: Q000
print(msg.format(self.config['url']))
return -1
@HabiticaCli.subcommand('spells') # pylint: disable=missing-docstring
class Spells(ApplicationWithApi):
DESCRIPTION = _("Prints all available spells") # noqa: Q000
def main(self):
if self.nested_command:
return
super().main()
user = self.api.user.get()
content = get_content(self.api)
user_level = user['stats']['lvl']
if user_level < 10:
print(_("Your level is too low. Come back on level 10 or higher")) # noqa: Q000
user_class = user['stats']['class']
user_spells = [
v for k, v in content['spells'][user_class].items()
if user_level > v['lvl']
]
print(_("You are a {} of level {}").format(_(user_class), user_level)) # noqa: Q000
for spell in sorted(user_spells, key=lambda x: x['lvl']):
msg = _("[{key}] {text} ({mana}:droplet:) - {notes}").format(**spell) # noqa: Q000
print(msg)
subcommands_file = local.path(SUBCOMMANDS_JSON)
if subcommands_file.exists():
try:
with open(subcommands_file) as subcommands_file_obj:
subcommands = json.load(subcommands_file_obj)
del subcommands_file_obj
for name, module in subcommands.items():
HabiticaCli.subcommand(name, module)
del name
del module
del subcommands
except (AttributeError, JSONDecodeError) as error:
warnings.warn('subcommands.json found, but it is invalid: {}'.format(error))
del error
del subcommands_file
if __name__ == '__main__':
HabiticaCli.run()
|
ASMfreaK/habitipy | habitipy/cli.py | ScoreInfo.color | python | def color(cls, value):
index = bisect(cls.breakpoints, value)
return colors.fg(cls.colors_[index]) | task value/score color | train | https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L289-L292 | null | class ScoreInfo:
"""task value/score info: http://habitica.wikia.com/wiki/Task_Value"""
scores = {
'wide': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'narrow': ['▁', '▂', '▃', '▄', '▅', '▆', '▇'],
'ascii': ['*', '**', '***', '****', '*****', '******', '*******']
}
colors_ = ['Red3', 'Red1', 'DarkOrange', 'Gold3A', 'Green', 'LightCyan3', 'Cyan1']
breakpoints = [-20, -10, -1, 1, 5, 10]
def __new__(cls, style, value):
index = bisect(cls.breakpoints, value)
score = cls.scores[style][index]
score_col = colors.fg(cls.colors_[index])
if style == 'ascii':
max_scores_len = max(map(len, cls.scores[style]))
score = '[' + score.center(max_scores_len) + ']'
# score = '⎡' + score.center(cls.max_scores_len) + '⎤'
return score_col | score
@classmethod
|
debrouwere/google-analytics | googleanalytics/columns.py | ColumnList.serialize | python | def serialize(self, value, greedy=True):
if greedy and not isinstance(value, Column):
value = self.normalize(value)
if isinstance(value, Column):
return value.id
else:
return value | Greedy serialization requires the value to either be a column
or convertible to a column, whereas non-greedy serialization
will pass through any string as-is and will only serialize
Column objects.
Non-greedy serialization is useful when preparing queries with
custom filters or segments. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/columns.py#L237-L254 | null | class ColumnList(addressable.List):
COLUMN_TYPE = Column
def __init__(self, columns, **options):
options['items'] = columns
options['name'] = self.COLUMN_TYPE.__class__.__name__
options['indices'] = ('name', 'id', 'slug', 'python_slug')
options['insensitive'] = True
super(ColumnList, self).__init__(**options)
@utils.vectorize
def normalize(self, value):
if isinstance(value, self.COLUMN_TYPE):
return value
else:
return self[value]
@utils.vectorize
|
debrouwere/google-analytics | googleanalytics/query.py | describe | python | def describe(profile, description):
api_type = description.pop('type', 'core')
api = getattr(profile, api_type)
return refine(api.query, description) | Generate a query by describing it as a series of actions
and parameters to those actions. These map directly
to Query methods and arguments to those methods.
This is an alternative to the chaining interface.
Mostly useful if you'd like to put your queries
in a file, rather than in Python code. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L1093-L1105 | [
"def refine(query, description):\n \"\"\"\n Refine a query from a dictionary of parameters that describes it.\n See `describe` for more information.\n \"\"\"\n\n for attribute, arguments in description.items():\n if hasattr(query, attribute):\n attribute = getattr(query, attribute)\... | # encoding: utf-8
"""
"""
import collections
import csv
from datetime import datetime
import hashlib
import json
import time
from copy import deepcopy
from functools import partial
import addressable
import inspector
import yaml
from dateutil.relativedelta import relativedelta
import prettytable
from . import errors, utils
from .columns import Column, ColumnList, Segment
INTERVAL_TIMEDELTAS = {
'year': dict(years=1),
'year_month': dict(months=1),
'year_week': dict(weeks=1),
'date': dict(days=1),
'date_hour': dict(hours=1),
}
def path(l, *keys):
indexed = {}
for el in l:
branch = indexed
for key in keys[:-1]:
value = getattr(el, key)
branch = branch.setdefault(value, {})
value = getattr(el, keys[-1])
branch.setdefault(value, el)
return indexed
def default(metric):
if 'avg' in metric:
return None
else:
return 0
class Report(object):
"""
Executing a query will return a report, which contains the requested data.
Queries are executed and turned into a report lazily, whenever data is requested.
You can also explicitly generate a report from a query by using the `Query#get` method.
```python
# will return a query object
profile.core.query.metrics('pageviews').range('yesterday')
# will return a report object
profile.core.query.metrics('pageviews').range('yesterday').get()
# will generate a report object and return its rows -- these
# two are equivalent
profile.core.query.metrics('pageviews').range('yesterday').rows
profile.core.query.metrics('pageviews').range('yesterday').get().rows
```
You can access the data in a Report object both rowwise and columnwise.
```python
report = query.metrics('pageviews', 'sessions').range('yesterday')
# first ten rows
report.rows[:10]
# work with just session data points
report['sessions'][:10]
report.rows[:10]['sessions']
```
For simple data structures, there are also some shortcuts.
These shortcuts are available both directly on Report objects
and lazily-loaded via Query objects.
```python
# reports with a single value
query = profile.core.query('pageviews').range('yesterday')
report = query.get()
assert query.value == report.value
# reports with a single metric
profile.core.query('pageviews').daily('yesterday', days=-10).values
# reports with a single result
query = profile.core.query(['pageviews', 'sessions']).range('yesterday')
assert query.first == query.last
```
"""
def __init__(self, raw, query):
self.raw = []
self.queries = []
all_columns = query.api.all_columns
report_columns = [column['name'] for column in raw['columnHeaders']]
self.columns = ColumnList([all_columns[column] for column in report_columns])
self.metrics = addressable.filter(lambda column: column.type == 'metric', self.columns)
self.dimensions = addressable.filter(lambda column: column.type == 'dimension', self.columns)
time_columns = ['date_hour', 'date', 'year_week', 'year_month', 'year']
try:
self.granularity = next(column for column in self.dimensions if column.python_slug in time_columns)
except StopIteration:
self.granularity = None
slugs = [column.python_slug for column in self.columns]
self.Row = collections.namedtuple('Row', slugs)
self.rows = []
self.append(raw, query)
self.since = self.until = None
if 'start-date' in raw['query']:
self.since = datetime.strptime(raw['query']['start-date'], '%Y-%m-%d')
if 'end-date' in raw['query']:
self.until = datetime.strptime(raw['query']['end-date'], '%Y-%m-%d')
def append(self, raw, query):
self.raw.append(raw)
self.queries.append(query)
self.is_complete = not 'nextLink' in raw
casters = [column.cast for column in self.columns]
# if no rows were returned, the GA API doesn't
# include the `rows` key at all
for row in self.raw[-1].get('rows', []):
typed_row = [casters[i](row[i]) for i in range(len(self.columns))]
typed_tuple = self.Row(*typed_row)
self.rows.append(typed_tuple)
# TODO: figure out how this works with paginated queries
self.totals = raw['totalsForAllResults']
# more intuitive when querying for just a single metric
self.total = list(raw['totalsForAllResults'].values())[0]
@property
def first(self):
if len(self.rows) == 0:
return None
else:
return self.rows[0]
@property
def last(self):
if len(self.rows) == 0:
return None
else:
return self.rows[-1]
@property
def value(self):
if len(self.rows) == 0:
return None
elif len(self.rows) == 1:
return self.values[0]
else:
raise ValueError("This report contains multiple rows or metrics. Please use `rows`, `first`, `last` or a column name.")
@property
def values(self):
if len(self.metrics) == 1:
metric = self.metrics[0]
return self[metric]
else:
raise ValueError("This report contains multiple metrics. Please use `rows`, `first`, `last` or a column name.")
# TODO: it'd be nice to have metadata from `ga` available as
# properties, rather than only having them in serialized form
# (so e.g. the actual metric objects with both serialized name, slug etc.)
def __expand(self):
import ranges
# 1. generate grid
# 1a. generate date grid
since = self.since
until = self.until + relativedelta(days=1)
granularity = self.granularity.slug
dimensions = [column.slug for column in self.dimensions]
metrics = [column.slug for column in self.metrics]
time_step = INTERVAL_TIMEDELTAS[self.granularity.slug]
time_ix = report.columns.index(self.granularity)
time_interval = (since, until)
time_range = list(ranges.date.range(*time_interval, **time_step))
# 1b. generate dimension factor grid
factors = {dimension: set(report[dimension]) for dimension in set(dimensions) - {granularity}}
# 1c. assemble grid
grid = list(itertools.product(time_range, *factors.values()))
# 2. fill in the grid with available data
ndim = len(self.dimensions)
index = path(report.rows, *report.slugs[:ndim])
filled = []
for row in grid:
try:
# navigate to data (if it exists)
index_columns = row[:ndim]
data = index
for column in index_columns:
data = data[column]
filled.append(data)
except KeyError:
# TODO: for some metrics (in particular averages)
# the default value should be None, for most others
# it should be zero
row_metrics = [None] * len(report.metrics)
filler = list(row) + row_metrics
row = report.Row(*filler)
filled.append(row)
report.rows = filled
return report
def serialize(self, format=None, with_metadata=False):
names = [column.name for column in self.columns]
if not format:
return self.as_dict(with_metadata=with_metadata)
elif format == 'json':
return json.dumps(self.as_dict(with_metadata=with_metadata), indent=4)
elif format == 'csv':
buf = utils.StringIO()
writer = csv.writer(buf)
writer.writerow(names)
writer.writerows(self.rows)
return buf.getvalue()
elif format == 'ascii':
table = prettytable.PrettyTable(names)
table.align = 'l'
for row in self.rows:
table.add_row(row)
if with_metadata:
return utils.format("""
{title}
{table}
""", title=self.queries[0].title, table=table)
else:
return table
def as_dict(self, with_metadata=False):
serialized = []
for row in self.rows:
row = row._asdict()
for key, value in row.items():
row[key] = utils.date.serialize(value)
serialized.append(row)
if with_metadata:
return {
'title': self.queries[0].title,
'queries': self.queries,
'metrics': [column.name for column in self.metrics],
'dimensions': [column.name for column in self.dimensions],
'results': serialized,
}
else:
return serialized
def as_dataframe(self):
import pandas
# passing every row as a dictionary is not terribly efficient,
# but it works for now
return pandas.DataFrame(self.as_dict())
def __getitem__(self, key):
try:
if isinstance(key, Column):
key = key.slug
i = self.columns.index(key)
return [row[i] for row in self.rows]
except ValueError:
raise ValueError(key + " not in column headers")
def __iter__(self):
raise NotImplementedError()
def __len__(self):
return len(self.rows)
# TODO: would be cool if we could split up headers
# into metrics vs. dimensions so we could say
# "pageviews by day, browser"
# (also see `title` and `description` on query objects)
def __repr__(self):
metrics = ', '.join([header.name for header in self.metrics])
dimensions = ','.join([header.name for header in self.dimensions])
if len(dimensions):
return '<googleanalytics.query.Report object: {} by {}'.format(
metrics, dimensions)
else:
return '<googleanalytics.query.Report object: {}'.format(
metrics)
EXCLUSION = {
'eq': 'neq',
'neq': 'eq',
'gt': 'lte',
'lt': 'gte',
'gte': 'lt',
'lte': 'gt',
're': 'nre',
'nre': 're',
'contains': 'ncontains',
'ncontains': 'contains',
}
def select(source, selection, invert=False):
selections = []
for key, values in selection.items():
if '__' in key:
column, method = key.split('__')
else:
column = key
method = 'eq'
if not hasattr(Column, method):
raise ValueError("{method} is not a valid selector. Choose from: {options}".format(
method=method,
options=', '.join(Column.selectors),
))
if invert:
method = EXCLUSION[method]
column = source[column]
selector = getattr(column, method)
if not isinstance(values, (list, tuple)):
values = [values]
# e.g. source=['cpc', 'cpm'] will return an OR
# filter for these two sources
for value in values:
selections.append(selector(value))
return selections
# TODO: refactor `raw` into `parameters`, with serialization
# (stringification) happening in one place, in `Query#build`
# and removing empty keys as necessary
# TODO: consider whether to pass everything through `Query#set`
# or otherwise avoid having two paths to modifying `raw`
class Query(object):
"""
Return a query for certain metrics and dimensions.
```python
# pageviews (metric) as a function of geographical region
profile.core.query('pageviews', 'region')
# pageviews as a function of browser
profile.core.query(['pageviews'], ['browser'])
```
The returned query can then be further refined using
all methods available on the `CoreQuery` object, such as
`limit`, `sort`, `segment` and so on.
Metrics and dimensions may be either strings (the column id or
the human-readable column name) or Metric or Dimension
objects.
Metrics and dimensions specified as a string are not case-sensitive.
```python
profile.query('PAGEVIEWS')
```
If specifying only a single metric or dimension, you can
but are not required to wrap it in a list.
"""
_lock = 0
def __init__(self, api, parameters={}, metadata={}, title=None):
self._title = title
self.raw = {
'ids': 'ga:' + api.profile.id,
'metrics': [],
'dimensions': [],
}
self.raw.update(parameters)
self.meta = {}
self.meta.update(metadata)
self.api = api
self.profile = api.profile
self.webproperty = api.profile.webproperty
self.account = api.profile.webproperty.account
self._report = None
# do not execute more than one query per second
def _wait(self):
now = time.time()
elapsed = now - self._lock
wait = max(0, 1 - elapsed)
time.sleep(wait)
self._lock = time.time()
return wait
@property
def endpoint(self):
return self.account.service.data().ga()
def clone(self):
query = self.__class__(
api=self.api,
parameters=deepcopy(self.raw),
metadata=deepcopy(self.meta),
)
return query
@utils.immutable
def set(self, key=None, value=None, **kwargs):
"""
`set` is a way to add raw properties to the request,
for features that this module does not
support or supports incompletely. For convenience's
sake, it will serialize Column objects but will
leave any other kind of value alone.
"""
serialize = partial(self.api.columns.serialize, greedy=False)
if key and value:
self.raw[key] = serialize(value)
elif key or kwargs:
properties = key or kwargs
for key, value in properties.items():
self.raw[key] = serialize(value)
else:
raise ValueError(
"Query#set requires a key and value, a properties dictionary or keyword arguments.")
return self
@utils.immutable
def columns(self, required_type=None, *values):
for column in self.api.columns.normalize(values, wrap=True):
if required_type and required_type != column.type:
raise ValueError('Tried to add {type} but received: {column}'.format(
type=required_type,
column=column,
))
self.raw[column.type + 's'].append(column.id)
return self
# TODO: maybe do something smarter, like {granularity} {metrics}
# by {dimensions} for {segment}, filtered by {filters}.
# First {limit} results from {start} to {end} /
# for {start=end}, sorted by {direction} {sort}.
@property
def description(self):
"""
A list of the metrics this query will ask for.
"""
if 'metrics' in self.raw:
metrics = self.raw['metrics']
head = metrics[0:-1] or metrics[0:1]
text = ", ".join(head)
if len(metrics) > 1:
tail = metrics[-1]
text = text + " and " + tail
else:
text = 'n/a'
return text
@property
def title(self):
return self._title or self.description
@title.setter
def title(self, value):
self._title = value
def metrics(self, *metrics):
"""
Return a new query with additional metrics.
```python
query.metrics('pageviews', 'page load time')
```
"""
return self.columns('metric', *metrics)
def dimensions(self, *dimensions):
"""
Return a new query with additional dimensions.
```python
query.dimensions('search term', 'search depth')
```
"""
return self.columns('dimension', *dimensions)
@utils.immutable
def sort(self, *columns, **options):
"""
Return a new query which will produce results sorted by
one or more metrics or dimensions. You may use plain
strings for the columns, or actual `Column`, `Metric`
and `Dimension` objects.
Add a minus in front of the metric (either the string or
the object) to sort in descending order.
```python
# sort using strings
query.sort('pageviews', '-device type')
# alternatively, ask for a descending sort in a keyword argument
query.sort('pageviews', descending=True)
# sort using metric, dimension or column objects
pageviews = profile.core.metrics['pageviews']
query.sort(-pageviews)
```
"""
sorts = self.meta.setdefault('sort', [])
for column in columns:
if isinstance(column, Column):
identifier = column.id
elif isinstance(column, utils.basestring):
descending = column.startswith('-') or options.get('descending', False)
identifier = self.api.columns[column.lstrip('-')].id
else:
raise ValueError("Can only sort on columns or column strings. Received: {}".format(column))
if descending:
sign = '-'
else:
sign = ''
sorts.append(sign + identifier)
self.raw['sort'] = ",".join(sorts)
return self
@utils.immutable
def filter(self, value=None, exclude=False, **selection):
""" Most of the actual functionality lives on the Column
object and the `all` and `any` functions. """
filters = self.meta.setdefault('filters', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [value]
elif len(selection):
value = select(self.api.columns, selection, invert=exclude)
filters.append(value)
self.raw['filters'] = utils.paste(filters, ',', ';')
return self
def exclude(self, **selection):
return self.filter(exclude=True, **selection)
def build(self, copy=True):
if copy:
raw = deepcopy(self.raw)
else:
raw = self.raw
raw['metrics'] = ','.join(self.raw['metrics'])
if len(raw['dimensions']):
raw['dimensions'] = ','.join(self.raw['dimensions'])
else:
raw['dimensions'] = None
return raw
@property
def cacheable(self):
start = 'start_date' in self.raw and not utils.date.is_relative(self.raw['start_date'])
end = 'end_date' in self.raw and not utils.date.is_relative(self.raw['end_date'])
return start and end
@property
def signature(self):
query = self.build(copy=False)
standardized_query = sorted(query.items(), key=lambda t: t[0])
serialized_query = json.dumps(standardized_query)
return hashlib.sha1(serialized_query.encode('utf-8')).hexdigest()
def execute(self):
raw = self.build()
if self.api.cache and self.cacheable and self.api.cache.exists(self.signature):
response = self.api.cache.get(raw)
else:
try:
self._wait()
response = self.endpoint.get(**raw).execute()
except Exception as err:
if isinstance(err, TypeError):
width = max(map(len, self.raw.keys()))
raw = [(key.ljust(width), value) for key, value in self.raw.items()]
parameters = utils.paste(raw, '\t', '\n')
diagnostics = utils.format(
"""
{message}
The query you submitted was:
{parameters}
""", message=str(err), parameters=parameters)
raise errors.InvalidRequestError(diagnostics)
else:
raise err
if self.api.cache and self.cacheable:
self.api.cache.set(raw, response)
return Report(response, self)
@property
def report(self):
if not self._report:
self._report = self.get()
return self._report
# lazy-loading shortcuts
def __getattr__(self, name):
# IPython shell display should not trigger lazy-loading
# (arguably this is an IPython issue and not our problem, but let's be pragmatic)
if name == '_ipython_display_':
raise AttributeError('Query objects have no custom IPython display behavior')
elif hasattr(self.report, name):
return getattr(self.report, name)
else:
raise AttributeError("'{cls}' object and its associated 'Report' object have no attribute '{name}'".format(
cls=self.__class__.__name__,
name=name,
))
def __repr__(self):
return "<googleanalytics.query.{} object: {} ({})>".format(self.__class__.__name__, self.title, self.profile.name)
class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
def precision(self, precision):
"""
For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
```
"""
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self
@utils.immutable
def interval(self, granularity):
"""
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
"""
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self
@utils.immutable
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
def limit(self, *_range):
"""
Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows.
"""
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues
"""
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def next(self):
"""
Return a new query with a modified `start_index`.
Mainly used internally to paginate through results.
"""
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self
def get(self):
"""
Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance.
"""
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report
class RealTimeQuery(Query):
"""
A query against the [Google Analytics Real Time API][realtime].
**Note:** brand new! Please test and submit any issues to GitHub.
[realtime]: https://developers.google.com/analytics/devguides/reporting/realtime/v3/reference/data/realtime#resource
"""
@property
def endpoint(self):
return self.account.service.data().realtime()
@utils.immutable
def limit(self, maximum):
"""
Return a new query, limited to a certain number of results.
Unlike core reporting queries, you cannot specify a starting
point for live queries, just the maximum results returned.
```python
# first 50
query.limit(50)
```
"""
self.meta['limit'] = maximum
self.raw.update({
'max_results': maximum,
})
return self
def get(self):
return self.execute()
# TODO: consider moving the blueprint functionality to a separate Python package
def refine(query, description):
"""
Refine a query from a dictionary of parameters that describes it.
See `describe` for more information.
"""
for attribute, arguments in description.items():
if hasattr(query, attribute):
attribute = getattr(query, attribute)
else:
raise ValueError("Unknown query method: " + attribute)
# query descriptions are often automatically generated, and
# may include empty calls, which we skip
if utils.isempty(arguments):
continue
if callable(attribute):
method = attribute
if isinstance(arguments, dict):
query = method(**arguments)
elif isinstance(arguments, list):
query = method(*arguments)
else:
query = method(arguments)
else:
setattr(attribute, arguments)
return query
|
debrouwere/google-analytics | googleanalytics/query.py | refine | python | def refine(query, description):
for attribute, arguments in description.items():
if hasattr(query, attribute):
attribute = getattr(query, attribute)
else:
raise ValueError("Unknown query method: " + attribute)
# query descriptions are often automatically generated, and
# may include empty calls, which we skip
if utils.isempty(arguments):
continue
if callable(attribute):
method = attribute
if isinstance(arguments, dict):
query = method(**arguments)
elif isinstance(arguments, list):
query = method(*arguments)
else:
query = method(arguments)
else:
setattr(attribute, arguments)
return query | Refine a query from a dictionary of parameters that describes it.
See `describe` for more information. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L1107-L1135 | [
"def isempty(obj):\n if isinstance(obj, list):\n return not len(list(filter(None, obj)))\n elif isinstance(obj, dict):\n return not len(obj)\n else:\n return not obj\n"
] | # encoding: utf-8
"""
"""
import collections
import csv
from datetime import datetime
import hashlib
import json
import time
from copy import deepcopy
from functools import partial
import addressable
import inspector
import yaml
from dateutil.relativedelta import relativedelta
import prettytable
from . import errors, utils
from .columns import Column, ColumnList, Segment
INTERVAL_TIMEDELTAS = {
'year': dict(years=1),
'year_month': dict(months=1),
'year_week': dict(weeks=1),
'date': dict(days=1),
'date_hour': dict(hours=1),
}
def path(l, *keys):
indexed = {}
for el in l:
branch = indexed
for key in keys[:-1]:
value = getattr(el, key)
branch = branch.setdefault(value, {})
value = getattr(el, keys[-1])
branch.setdefault(value, el)
return indexed
def default(metric):
if 'avg' in metric:
return None
else:
return 0
class Report(object):
"""
Executing a query will return a report, which contains the requested data.
Queries are executed and turned into a report lazily, whenever data is requested.
You can also explicitly generate a report from a query by using the `Query#get` method.
```python
# will return a query object
profile.core.query.metrics('pageviews').range('yesterday')
# will return a report object
profile.core.query.metrics('pageviews').range('yesterday').get()
# will generate a report object and return its rows -- these
# two are equivalent
profile.core.query.metrics('pageviews').range('yesterday').rows
profile.core.query.metrics('pageviews').range('yesterday').get().rows
```
You can access the data in a Report object both rowwise and columnwise.
```python
report = query.metrics('pageviews', 'sessions').range('yesterday')
# first ten rows
report.rows[:10]
# work with just session data points
report['sessions'][:10]
report.rows[:10]['sessions']
```
For simple data structures, there are also some shortcuts.
These shortcuts are available both directly on Report objects
and lazily-loaded via Query objects.
```python
# reports with a single value
query = profile.core.query('pageviews').range('yesterday')
report = query.get()
assert query.value == report.value
# reports with a single metric
profile.core.query('pageviews').daily('yesterday', days=-10).values
# reports with a single result
query = profile.core.query(['pageviews', 'sessions']).range('yesterday')
assert query.first == query.last
```
"""
def __init__(self, raw, query):
self.raw = []
self.queries = []
all_columns = query.api.all_columns
report_columns = [column['name'] for column in raw['columnHeaders']]
self.columns = ColumnList([all_columns[column] for column in report_columns])
self.metrics = addressable.filter(lambda column: column.type == 'metric', self.columns)
self.dimensions = addressable.filter(lambda column: column.type == 'dimension', self.columns)
time_columns = ['date_hour', 'date', 'year_week', 'year_month', 'year']
try:
self.granularity = next(column for column in self.dimensions if column.python_slug in time_columns)
except StopIteration:
self.granularity = None
slugs = [column.python_slug for column in self.columns]
self.Row = collections.namedtuple('Row', slugs)
self.rows = []
self.append(raw, query)
self.since = self.until = None
if 'start-date' in raw['query']:
self.since = datetime.strptime(raw['query']['start-date'], '%Y-%m-%d')
if 'end-date' in raw['query']:
self.until = datetime.strptime(raw['query']['end-date'], '%Y-%m-%d')
def append(self, raw, query):
self.raw.append(raw)
self.queries.append(query)
self.is_complete = not 'nextLink' in raw
casters = [column.cast for column in self.columns]
# if no rows were returned, the GA API doesn't
# include the `rows` key at all
for row in self.raw[-1].get('rows', []):
typed_row = [casters[i](row[i]) for i in range(len(self.columns))]
typed_tuple = self.Row(*typed_row)
self.rows.append(typed_tuple)
# TODO: figure out how this works with paginated queries
self.totals = raw['totalsForAllResults']
# more intuitive when querying for just a single metric
self.total = list(raw['totalsForAllResults'].values())[0]
@property
def first(self):
if len(self.rows) == 0:
return None
else:
return self.rows[0]
@property
def last(self):
if len(self.rows) == 0:
return None
else:
return self.rows[-1]
@property
def value(self):
if len(self.rows) == 0:
return None
elif len(self.rows) == 1:
return self.values[0]
else:
raise ValueError("This report contains multiple rows or metrics. Please use `rows`, `first`, `last` or a column name.")
@property
def values(self):
if len(self.metrics) == 1:
metric = self.metrics[0]
return self[metric]
else:
raise ValueError("This report contains multiple metrics. Please use `rows`, `first`, `last` or a column name.")
# TODO: it'd be nice to have metadata from `ga` available as
# properties, rather than only having them in serialized form
# (so e.g. the actual metric objects with both serialized name, slug etc.)
def __expand(self):
import ranges
# 1. generate grid
# 1a. generate date grid
since = self.since
until = self.until + relativedelta(days=1)
granularity = self.granularity.slug
dimensions = [column.slug for column in self.dimensions]
metrics = [column.slug for column in self.metrics]
time_step = INTERVAL_TIMEDELTAS[self.granularity.slug]
time_ix = report.columns.index(self.granularity)
time_interval = (since, until)
time_range = list(ranges.date.range(*time_interval, **time_step))
# 1b. generate dimension factor grid
factors = {dimension: set(report[dimension]) for dimension in set(dimensions) - {granularity}}
# 1c. assemble grid
grid = list(itertools.product(time_range, *factors.values()))
# 2. fill in the grid with available data
ndim = len(self.dimensions)
index = path(report.rows, *report.slugs[:ndim])
filled = []
for row in grid:
try:
# navigate to data (if it exists)
index_columns = row[:ndim]
data = index
for column in index_columns:
data = data[column]
filled.append(data)
except KeyError:
# TODO: for some metrics (in particular averages)
# the default value should be None, for most others
# it should be zero
row_metrics = [None] * len(report.metrics)
filler = list(row) + row_metrics
row = report.Row(*filler)
filled.append(row)
report.rows = filled
return report
def serialize(self, format=None, with_metadata=False):
names = [column.name for column in self.columns]
if not format:
return self.as_dict(with_metadata=with_metadata)
elif format == 'json':
return json.dumps(self.as_dict(with_metadata=with_metadata), indent=4)
elif format == 'csv':
buf = utils.StringIO()
writer = csv.writer(buf)
writer.writerow(names)
writer.writerows(self.rows)
return buf.getvalue()
elif format == 'ascii':
table = prettytable.PrettyTable(names)
table.align = 'l'
for row in self.rows:
table.add_row(row)
if with_metadata:
return utils.format("""
{title}
{table}
""", title=self.queries[0].title, table=table)
else:
return table
def as_dict(self, with_metadata=False):
serialized = []
for row in self.rows:
row = row._asdict()
for key, value in row.items():
row[key] = utils.date.serialize(value)
serialized.append(row)
if with_metadata:
return {
'title': self.queries[0].title,
'queries': self.queries,
'metrics': [column.name for column in self.metrics],
'dimensions': [column.name for column in self.dimensions],
'results': serialized,
}
else:
return serialized
def as_dataframe(self):
import pandas
# passing every row as a dictionary is not terribly efficient,
# but it works for now
return pandas.DataFrame(self.as_dict())
def __getitem__(self, key):
try:
if isinstance(key, Column):
key = key.slug
i = self.columns.index(key)
return [row[i] for row in self.rows]
except ValueError:
raise ValueError(key + " not in column headers")
def __iter__(self):
raise NotImplementedError()
def __len__(self):
return len(self.rows)
# TODO: would be cool if we could split up headers
# into metrics vs. dimensions so we could say
# "pageviews by day, browser"
# (also see `title` and `description` on query objects)
def __repr__(self):
metrics = ', '.join([header.name for header in self.metrics])
dimensions = ','.join([header.name for header in self.dimensions])
if len(dimensions):
return '<googleanalytics.query.Report object: {} by {}'.format(
metrics, dimensions)
else:
return '<googleanalytics.query.Report object: {}'.format(
metrics)
EXCLUSION = {
'eq': 'neq',
'neq': 'eq',
'gt': 'lte',
'lt': 'gte',
'gte': 'lt',
'lte': 'gt',
're': 'nre',
'nre': 're',
'contains': 'ncontains',
'ncontains': 'contains',
}
def select(source, selection, invert=False):
selections = []
for key, values in selection.items():
if '__' in key:
column, method = key.split('__')
else:
column = key
method = 'eq'
if not hasattr(Column, method):
raise ValueError("{method} is not a valid selector. Choose from: {options}".format(
method=method,
options=', '.join(Column.selectors),
))
if invert:
method = EXCLUSION[method]
column = source[column]
selector = getattr(column, method)
if not isinstance(values, (list, tuple)):
values = [values]
# e.g. source=['cpc', 'cpm'] will return an OR
# filter for these two sources
for value in values:
selections.append(selector(value))
return selections
# TODO: refactor `raw` into `parameters`, with serialization
# (stringification) happening in one place, in `Query#build`
# and removing empty keys as necessary
# TODO: consider whether to pass everything through `Query#set`
# or otherwise avoid having two paths to modifying `raw`
class Query(object):
"""
Return a query for certain metrics and dimensions.
```python
# pageviews (metric) as a function of geographical region
profile.core.query('pageviews', 'region')
# pageviews as a function of browser
profile.core.query(['pageviews'], ['browser'])
```
The returned query can then be further refined using
all methods available on the `CoreQuery` object, such as
`limit`, `sort`, `segment` and so on.
Metrics and dimensions may be either strings (the column id or
the human-readable column name) or Metric or Dimension
objects.
Metrics and dimensions specified as a string are not case-sensitive.
```python
profile.query('PAGEVIEWS')
```
If specifying only a single metric or dimension, you can
but are not required to wrap it in a list.
"""
_lock = 0
def __init__(self, api, parameters={}, metadata={}, title=None):
self._title = title
self.raw = {
'ids': 'ga:' + api.profile.id,
'metrics': [],
'dimensions': [],
}
self.raw.update(parameters)
self.meta = {}
self.meta.update(metadata)
self.api = api
self.profile = api.profile
self.webproperty = api.profile.webproperty
self.account = api.profile.webproperty.account
self._report = None
# do not execute more than one query per second
def _wait(self):
now = time.time()
elapsed = now - self._lock
wait = max(0, 1 - elapsed)
time.sleep(wait)
self._lock = time.time()
return wait
@property
def endpoint(self):
return self.account.service.data().ga()
def clone(self):
query = self.__class__(
api=self.api,
parameters=deepcopy(self.raw),
metadata=deepcopy(self.meta),
)
return query
@utils.immutable
def set(self, key=None, value=None, **kwargs):
"""
`set` is a way to add raw properties to the request,
for features that this module does not
support or supports incompletely. For convenience's
sake, it will serialize Column objects but will
leave any other kind of value alone.
"""
serialize = partial(self.api.columns.serialize, greedy=False)
if key and value:
self.raw[key] = serialize(value)
elif key or kwargs:
properties = key or kwargs
for key, value in properties.items():
self.raw[key] = serialize(value)
else:
raise ValueError(
"Query#set requires a key and value, a properties dictionary or keyword arguments.")
return self
@utils.immutable
def columns(self, required_type=None, *values):
for column in self.api.columns.normalize(values, wrap=True):
if required_type and required_type != column.type:
raise ValueError('Tried to add {type} but received: {column}'.format(
type=required_type,
column=column,
))
self.raw[column.type + 's'].append(column.id)
return self
# TODO: maybe do something smarter, like {granularity} {metrics}
# by {dimensions} for {segment}, filtered by {filters}.
# First {limit} results from {start} to {end} /
# for {start=end}, sorted by {direction} {sort}.
@property
def description(self):
"""
A list of the metrics this query will ask for.
"""
if 'metrics' in self.raw:
metrics = self.raw['metrics']
head = metrics[0:-1] or metrics[0:1]
text = ", ".join(head)
if len(metrics) > 1:
tail = metrics[-1]
text = text + " and " + tail
else:
text = 'n/a'
return text
@property
def title(self):
return self._title or self.description
@title.setter
def title(self, value):
self._title = value
def metrics(self, *metrics):
"""
Return a new query with additional metrics.
```python
query.metrics('pageviews', 'page load time')
```
"""
return self.columns('metric', *metrics)
def dimensions(self, *dimensions):
"""
Return a new query with additional dimensions.
```python
query.dimensions('search term', 'search depth')
```
"""
return self.columns('dimension', *dimensions)
@utils.immutable
def sort(self, *columns, **options):
"""
Return a new query which will produce results sorted by
one or more metrics or dimensions. You may use plain
strings for the columns, or actual `Column`, `Metric`
and `Dimension` objects.
Add a minus in front of the metric (either the string or
the object) to sort in descending order.
```python
# sort using strings
query.sort('pageviews', '-device type')
# alternatively, ask for a descending sort in a keyword argument
query.sort('pageviews', descending=True)
# sort using metric, dimension or column objects
pageviews = profile.core.metrics['pageviews']
query.sort(-pageviews)
```
"""
sorts = self.meta.setdefault('sort', [])
for column in columns:
if isinstance(column, Column):
identifier = column.id
elif isinstance(column, utils.basestring):
descending = column.startswith('-') or options.get('descending', False)
identifier = self.api.columns[column.lstrip('-')].id
else:
raise ValueError("Can only sort on columns or column strings. Received: {}".format(column))
if descending:
sign = '-'
else:
sign = ''
sorts.append(sign + identifier)
self.raw['sort'] = ",".join(sorts)
return self
@utils.immutable
def filter(self, value=None, exclude=False, **selection):
""" Most of the actual functionality lives on the Column
object and the `all` and `any` functions. """
filters = self.meta.setdefault('filters', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [value]
elif len(selection):
value = select(self.api.columns, selection, invert=exclude)
filters.append(value)
self.raw['filters'] = utils.paste(filters, ',', ';')
return self
def exclude(self, **selection):
return self.filter(exclude=True, **selection)
def build(self, copy=True):
if copy:
raw = deepcopy(self.raw)
else:
raw = self.raw
raw['metrics'] = ','.join(self.raw['metrics'])
if len(raw['dimensions']):
raw['dimensions'] = ','.join(self.raw['dimensions'])
else:
raw['dimensions'] = None
return raw
@property
def cacheable(self):
start = 'start_date' in self.raw and not utils.date.is_relative(self.raw['start_date'])
end = 'end_date' in self.raw and not utils.date.is_relative(self.raw['end_date'])
return start and end
@property
def signature(self):
query = self.build(copy=False)
standardized_query = sorted(query.items(), key=lambda t: t[0])
serialized_query = json.dumps(standardized_query)
return hashlib.sha1(serialized_query.encode('utf-8')).hexdigest()
def execute(self):
raw = self.build()
if self.api.cache and self.cacheable and self.api.cache.exists(self.signature):
response = self.api.cache.get(raw)
else:
try:
self._wait()
response = self.endpoint.get(**raw).execute()
except Exception as err:
if isinstance(err, TypeError):
width = max(map(len, self.raw.keys()))
raw = [(key.ljust(width), value) for key, value in self.raw.items()]
parameters = utils.paste(raw, '\t', '\n')
diagnostics = utils.format(
"""
{message}
The query you submitted was:
{parameters}
""", message=str(err), parameters=parameters)
raise errors.InvalidRequestError(diagnostics)
else:
raise err
if self.api.cache and self.cacheable:
self.api.cache.set(raw, response)
return Report(response, self)
@property
def report(self):
if not self._report:
self._report = self.get()
return self._report
# lazy-loading shortcuts
def __getattr__(self, name):
# IPython shell display should not trigger lazy-loading
# (arguably this is an IPython issue and not our problem, but let's be pragmatic)
if name == '_ipython_display_':
raise AttributeError('Query objects have no custom IPython display behavior')
elif hasattr(self.report, name):
return getattr(self.report, name)
else:
raise AttributeError("'{cls}' object and its associated 'Report' object have no attribute '{name}'".format(
cls=self.__class__.__name__,
name=name,
))
def __repr__(self):
return "<googleanalytics.query.{} object: {} ({})>".format(self.__class__.__name__, self.title, self.profile.name)
class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
def precision(self, precision):
"""
For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
```
"""
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self
@utils.immutable
def interval(self, granularity):
"""
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
"""
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self
@utils.immutable
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
def limit(self, *_range):
"""
Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows.
"""
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues
"""
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def next(self):
"""
Return a new query with a modified `start_index`.
Mainly used internally to paginate through results.
"""
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self
def get(self):
"""
Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance.
"""
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report
class RealTimeQuery(Query):
"""
A query against the [Google Analytics Real Time API][realtime].
**Note:** brand new! Please test and submit any issues to GitHub.
[realtime]: https://developers.google.com/analytics/devguides/reporting/realtime/v3/reference/data/realtime#resource
"""
@property
def endpoint(self):
return self.account.service.data().realtime()
@utils.immutable
def limit(self, maximum):
"""
Return a new query, limited to a certain number of results.
Unlike core reporting queries, you cannot specify a starting
point for live queries, just the maximum results returned.
```python
# first 50
query.limit(50)
```
"""
self.meta['limit'] = maximum
self.raw.update({
'max_results': maximum,
})
return self
def get(self):
return self.execute()
# TODO: consider moving the blueprint functionality to a separate Python package
def describe(profile, description):
"""
Generate a query by describing it as a series of actions
and parameters to those actions. These map directly
to Query methods and arguments to those methods.
This is an alternative to the chaining interface.
Mostly useful if you'd like to put your queries
in a file, rather than in Python code.
"""
api_type = description.pop('type', 'core')
api = getattr(profile, api_type)
return refine(api.query, description)
|
debrouwere/google-analytics | googleanalytics/query.py | Query.set | python | def set(self, key=None, value=None, **kwargs):
serialize = partial(self.api.columns.serialize, greedy=False)
if key and value:
self.raw[key] = serialize(value)
elif key or kwargs:
properties = key or kwargs
for key, value in properties.items():
self.raw[key] = serialize(value)
else:
raise ValueError(
"Query#set requires a key and value, a properties dictionary or keyword arguments.")
return self | `set` is a way to add raw properties to the request,
for features that this module does not
support or supports incompletely. For convenience's
sake, it will serialize Column objects but will
leave any other kind of value alone. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L430-L451 | null | class Query(object):
"""
Return a query for certain metrics and dimensions.
```python
# pageviews (metric) as a function of geographical region
profile.core.query('pageviews', 'region')
# pageviews as a function of browser
profile.core.query(['pageviews'], ['browser'])
```
The returned query can then be further refined using
all methods available on the `CoreQuery` object, such as
`limit`, `sort`, `segment` and so on.
Metrics and dimensions may be either strings (the column id or
the human-readable column name) or Metric or Dimension
objects.
Metrics and dimensions specified as a string are not case-sensitive.
```python
profile.query('PAGEVIEWS')
```
If specifying only a single metric or dimension, you can
but are not required to wrap it in a list.
"""
_lock = 0
def __init__(self, api, parameters={}, metadata={}, title=None):
self._title = title
self.raw = {
'ids': 'ga:' + api.profile.id,
'metrics': [],
'dimensions': [],
}
self.raw.update(parameters)
self.meta = {}
self.meta.update(metadata)
self.api = api
self.profile = api.profile
self.webproperty = api.profile.webproperty
self.account = api.profile.webproperty.account
self._report = None
# do not execute more than one query per second
def _wait(self):
now = time.time()
elapsed = now - self._lock
wait = max(0, 1 - elapsed)
time.sleep(wait)
self._lock = time.time()
return wait
@property
def endpoint(self):
return self.account.service.data().ga()
def clone(self):
query = self.__class__(
api=self.api,
parameters=deepcopy(self.raw),
metadata=deepcopy(self.meta),
)
return query
@utils.immutable
@utils.immutable
def columns(self, required_type=None, *values):
for column in self.api.columns.normalize(values, wrap=True):
if required_type and required_type != column.type:
raise ValueError('Tried to add {type} but received: {column}'.format(
type=required_type,
column=column,
))
self.raw[column.type + 's'].append(column.id)
return self
# TODO: maybe do something smarter, like {granularity} {metrics}
# by {dimensions} for {segment}, filtered by {filters}.
# First {limit} results from {start} to {end} /
# for {start=end}, sorted by {direction} {sort}.
@property
def description(self):
"""
A list of the metrics this query will ask for.
"""
if 'metrics' in self.raw:
metrics = self.raw['metrics']
head = metrics[0:-1] or metrics[0:1]
text = ", ".join(head)
if len(metrics) > 1:
tail = metrics[-1]
text = text + " and " + tail
else:
text = 'n/a'
return text
@property
def title(self):
return self._title or self.description
@title.setter
def title(self, value):
self._title = value
def metrics(self, *metrics):
"""
Return a new query with additional metrics.
```python
query.metrics('pageviews', 'page load time')
```
"""
return self.columns('metric', *metrics)
def dimensions(self, *dimensions):
"""
Return a new query with additional dimensions.
```python
query.dimensions('search term', 'search depth')
```
"""
return self.columns('dimension', *dimensions)
@utils.immutable
def sort(self, *columns, **options):
"""
Return a new query which will produce results sorted by
one or more metrics or dimensions. You may use plain
strings for the columns, or actual `Column`, `Metric`
and `Dimension` objects.
Add a minus in front of the metric (either the string or
the object) to sort in descending order.
```python
# sort using strings
query.sort('pageviews', '-device type')
# alternatively, ask for a descending sort in a keyword argument
query.sort('pageviews', descending=True)
# sort using metric, dimension or column objects
pageviews = profile.core.metrics['pageviews']
query.sort(-pageviews)
```
"""
sorts = self.meta.setdefault('sort', [])
for column in columns:
if isinstance(column, Column):
identifier = column.id
elif isinstance(column, utils.basestring):
descending = column.startswith('-') or options.get('descending', False)
identifier = self.api.columns[column.lstrip('-')].id
else:
raise ValueError("Can only sort on columns or column strings. Received: {}".format(column))
if descending:
sign = '-'
else:
sign = ''
sorts.append(sign + identifier)
self.raw['sort'] = ",".join(sorts)
return self
@utils.immutable
def filter(self, value=None, exclude=False, **selection):
""" Most of the actual functionality lives on the Column
object and the `all` and `any` functions. """
filters = self.meta.setdefault('filters', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [value]
elif len(selection):
value = select(self.api.columns, selection, invert=exclude)
filters.append(value)
self.raw['filters'] = utils.paste(filters, ',', ';')
return self
def exclude(self, **selection):
return self.filter(exclude=True, **selection)
def build(self, copy=True):
if copy:
raw = deepcopy(self.raw)
else:
raw = self.raw
raw['metrics'] = ','.join(self.raw['metrics'])
if len(raw['dimensions']):
raw['dimensions'] = ','.join(self.raw['dimensions'])
else:
raw['dimensions'] = None
return raw
@property
def cacheable(self):
start = 'start_date' in self.raw and not utils.date.is_relative(self.raw['start_date'])
end = 'end_date' in self.raw and not utils.date.is_relative(self.raw['end_date'])
return start and end
@property
def signature(self):
query = self.build(copy=False)
standardized_query = sorted(query.items(), key=lambda t: t[0])
serialized_query = json.dumps(standardized_query)
return hashlib.sha1(serialized_query.encode('utf-8')).hexdigest()
def execute(self):
raw = self.build()
if self.api.cache and self.cacheable and self.api.cache.exists(self.signature):
response = self.api.cache.get(raw)
else:
try:
self._wait()
response = self.endpoint.get(**raw).execute()
except Exception as err:
if isinstance(err, TypeError):
width = max(map(len, self.raw.keys()))
raw = [(key.ljust(width), value) for key, value in self.raw.items()]
parameters = utils.paste(raw, '\t', '\n')
diagnostics = utils.format(
"""
{message}
The query you submitted was:
{parameters}
""", message=str(err), parameters=parameters)
raise errors.InvalidRequestError(diagnostics)
else:
raise err
if self.api.cache and self.cacheable:
self.api.cache.set(raw, response)
return Report(response, self)
@property
def report(self):
if not self._report:
self._report = self.get()
return self._report
# lazy-loading shortcuts
def __getattr__(self, name):
# IPython shell display should not trigger lazy-loading
# (arguably this is an IPython issue and not our problem, but let's be pragmatic)
if name == '_ipython_display_':
raise AttributeError('Query objects have no custom IPython display behavior')
elif hasattr(self.report, name):
return getattr(self.report, name)
else:
raise AttributeError("'{cls}' object and its associated 'Report' object have no attribute '{name}'".format(
cls=self.__class__.__name__,
name=name,
))
def __repr__(self):
return "<googleanalytics.query.{} object: {} ({})>".format(self.__class__.__name__, self.title, self.profile.name)
|
debrouwere/google-analytics | googleanalytics/query.py | Query.description | python | def description(self):
if 'metrics' in self.raw:
metrics = self.raw['metrics']
head = metrics[0:-1] or metrics[0:1]
text = ", ".join(head)
if len(metrics) > 1:
tail = metrics[-1]
text = text + " and " + tail
else:
text = 'n/a'
return text | A list of the metrics this query will ask for. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L469-L484 | null | class Query(object):
"""
Return a query for certain metrics and dimensions.
```python
# pageviews (metric) as a function of geographical region
profile.core.query('pageviews', 'region')
# pageviews as a function of browser
profile.core.query(['pageviews'], ['browser'])
```
The returned query can then be further refined using
all methods available on the `CoreQuery` object, such as
`limit`, `sort`, `segment` and so on.
Metrics and dimensions may be either strings (the column id or
the human-readable column name) or Metric or Dimension
objects.
Metrics and dimensions specified as a string are not case-sensitive.
```python
profile.query('PAGEVIEWS')
```
If specifying only a single metric or dimension, you can
but are not required to wrap it in a list.
"""
_lock = 0
def __init__(self, api, parameters={}, metadata={}, title=None):
self._title = title
self.raw = {
'ids': 'ga:' + api.profile.id,
'metrics': [],
'dimensions': [],
}
self.raw.update(parameters)
self.meta = {}
self.meta.update(metadata)
self.api = api
self.profile = api.profile
self.webproperty = api.profile.webproperty
self.account = api.profile.webproperty.account
self._report = None
# do not execute more than one query per second
def _wait(self):
now = time.time()
elapsed = now - self._lock
wait = max(0, 1 - elapsed)
time.sleep(wait)
self._lock = time.time()
return wait
@property
def endpoint(self):
return self.account.service.data().ga()
def clone(self):
query = self.__class__(
api=self.api,
parameters=deepcopy(self.raw),
metadata=deepcopy(self.meta),
)
return query
@utils.immutable
def set(self, key=None, value=None, **kwargs):
"""
`set` is a way to add raw properties to the request,
for features that this module does not
support or supports incompletely. For convenience's
sake, it will serialize Column objects but will
leave any other kind of value alone.
"""
serialize = partial(self.api.columns.serialize, greedy=False)
if key and value:
self.raw[key] = serialize(value)
elif key or kwargs:
properties = key or kwargs
for key, value in properties.items():
self.raw[key] = serialize(value)
else:
raise ValueError(
"Query#set requires a key and value, a properties dictionary or keyword arguments.")
return self
@utils.immutable
def columns(self, required_type=None, *values):
for column in self.api.columns.normalize(values, wrap=True):
if required_type and required_type != column.type:
raise ValueError('Tried to add {type} but received: {column}'.format(
type=required_type,
column=column,
))
self.raw[column.type + 's'].append(column.id)
return self
# TODO: maybe do something smarter, like {granularity} {metrics}
# by {dimensions} for {segment}, filtered by {filters}.
# First {limit} results from {start} to {end} /
# for {start=end}, sorted by {direction} {sort}.
@property
@property
def title(self):
return self._title or self.description
@title.setter
def title(self, value):
self._title = value
def metrics(self, *metrics):
"""
Return a new query with additional metrics.
```python
query.metrics('pageviews', 'page load time')
```
"""
return self.columns('metric', *metrics)
def dimensions(self, *dimensions):
"""
Return a new query with additional dimensions.
```python
query.dimensions('search term', 'search depth')
```
"""
return self.columns('dimension', *dimensions)
@utils.immutable
def sort(self, *columns, **options):
"""
Return a new query which will produce results sorted by
one or more metrics or dimensions. You may use plain
strings for the columns, or actual `Column`, `Metric`
and `Dimension` objects.
Add a minus in front of the metric (either the string or
the object) to sort in descending order.
```python
# sort using strings
query.sort('pageviews', '-device type')
# alternatively, ask for a descending sort in a keyword argument
query.sort('pageviews', descending=True)
# sort using metric, dimension or column objects
pageviews = profile.core.metrics['pageviews']
query.sort(-pageviews)
```
"""
sorts = self.meta.setdefault('sort', [])
for column in columns:
if isinstance(column, Column):
identifier = column.id
elif isinstance(column, utils.basestring):
descending = column.startswith('-') or options.get('descending', False)
identifier = self.api.columns[column.lstrip('-')].id
else:
raise ValueError("Can only sort on columns or column strings. Received: {}".format(column))
if descending:
sign = '-'
else:
sign = ''
sorts.append(sign + identifier)
self.raw['sort'] = ",".join(sorts)
return self
@utils.immutable
def filter(self, value=None, exclude=False, **selection):
""" Most of the actual functionality lives on the Column
object and the `all` and `any` functions. """
filters = self.meta.setdefault('filters', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [value]
elif len(selection):
value = select(self.api.columns, selection, invert=exclude)
filters.append(value)
self.raw['filters'] = utils.paste(filters, ',', ';')
return self
def exclude(self, **selection):
return self.filter(exclude=True, **selection)
def build(self, copy=True):
if copy:
raw = deepcopy(self.raw)
else:
raw = self.raw
raw['metrics'] = ','.join(self.raw['metrics'])
if len(raw['dimensions']):
raw['dimensions'] = ','.join(self.raw['dimensions'])
else:
raw['dimensions'] = None
return raw
@property
def cacheable(self):
start = 'start_date' in self.raw and not utils.date.is_relative(self.raw['start_date'])
end = 'end_date' in self.raw and not utils.date.is_relative(self.raw['end_date'])
return start and end
@property
def signature(self):
query = self.build(copy=False)
standardized_query = sorted(query.items(), key=lambda t: t[0])
serialized_query = json.dumps(standardized_query)
return hashlib.sha1(serialized_query.encode('utf-8')).hexdigest()
def execute(self):
raw = self.build()
if self.api.cache and self.cacheable and self.api.cache.exists(self.signature):
response = self.api.cache.get(raw)
else:
try:
self._wait()
response = self.endpoint.get(**raw).execute()
except Exception as err:
if isinstance(err, TypeError):
width = max(map(len, self.raw.keys()))
raw = [(key.ljust(width), value) for key, value in self.raw.items()]
parameters = utils.paste(raw, '\t', '\n')
diagnostics = utils.format(
"""
{message}
The query you submitted was:
{parameters}
""", message=str(err), parameters=parameters)
raise errors.InvalidRequestError(diagnostics)
else:
raise err
if self.api.cache and self.cacheable:
self.api.cache.set(raw, response)
return Report(response, self)
@property
def report(self):
if not self._report:
self._report = self.get()
return self._report
# lazy-loading shortcuts
def __getattr__(self, name):
# IPython shell display should not trigger lazy-loading
# (arguably this is an IPython issue and not our problem, but let's be pragmatic)
if name == '_ipython_display_':
raise AttributeError('Query objects have no custom IPython display behavior')
elif hasattr(self.report, name):
return getattr(self.report, name)
else:
raise AttributeError("'{cls}' object and its associated 'Report' object have no attribute '{name}'".format(
cls=self.__class__.__name__,
name=name,
))
def __repr__(self):
return "<googleanalytics.query.{} object: {} ({})>".format(self.__class__.__name__, self.title, self.profile.name)
|
debrouwere/google-analytics | googleanalytics/query.py | Query.sort | python | def sort(self, *columns, **options):
sorts = self.meta.setdefault('sort', [])
for column in columns:
if isinstance(column, Column):
identifier = column.id
elif isinstance(column, utils.basestring):
descending = column.startswith('-') or options.get('descending', False)
identifier = self.api.columns[column.lstrip('-')].id
else:
raise ValueError("Can only sort on columns or column strings. Received: {}".format(column))
if descending:
sign = '-'
else:
sign = ''
sorts.append(sign + identifier)
self.raw['sort'] = ",".join(sorts)
return self | Return a new query which will produce results sorted by
one or more metrics or dimensions. You may use plain
strings for the columns, or actual `Column`, `Metric`
and `Dimension` objects.
Add a minus in front of the metric (either the string or
the object) to sort in descending order.
```python
# sort using strings
query.sort('pageviews', '-device type')
# alternatively, ask for a descending sort in a keyword argument
query.sort('pageviews', descending=True)
# sort using metric, dimension or column objects
pageviews = profile.core.metrics['pageviews']
query.sort(-pageviews)
``` | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L515-L556 | null | class Query(object):
"""
Return a query for certain metrics and dimensions.
```python
# pageviews (metric) as a function of geographical region
profile.core.query('pageviews', 'region')
# pageviews as a function of browser
profile.core.query(['pageviews'], ['browser'])
```
The returned query can then be further refined using
all methods available on the `CoreQuery` object, such as
`limit`, `sort`, `segment` and so on.
Metrics and dimensions may be either strings (the column id or
the human-readable column name) or Metric or Dimension
objects.
Metrics and dimensions specified as a string are not case-sensitive.
```python
profile.query('PAGEVIEWS')
```
If specifying only a single metric or dimension, you can
but are not required to wrap it in a list.
"""
_lock = 0
def __init__(self, api, parameters={}, metadata={}, title=None):
self._title = title
self.raw = {
'ids': 'ga:' + api.profile.id,
'metrics': [],
'dimensions': [],
}
self.raw.update(parameters)
self.meta = {}
self.meta.update(metadata)
self.api = api
self.profile = api.profile
self.webproperty = api.profile.webproperty
self.account = api.profile.webproperty.account
self._report = None
# do not execute more than one query per second
def _wait(self):
now = time.time()
elapsed = now - self._lock
wait = max(0, 1 - elapsed)
time.sleep(wait)
self._lock = time.time()
return wait
@property
def endpoint(self):
return self.account.service.data().ga()
def clone(self):
query = self.__class__(
api=self.api,
parameters=deepcopy(self.raw),
metadata=deepcopy(self.meta),
)
return query
@utils.immutable
def set(self, key=None, value=None, **kwargs):
"""
`set` is a way to add raw properties to the request,
for features that this module does not
support or supports incompletely. For convenience's
sake, it will serialize Column objects but will
leave any other kind of value alone.
"""
serialize = partial(self.api.columns.serialize, greedy=False)
if key and value:
self.raw[key] = serialize(value)
elif key or kwargs:
properties = key or kwargs
for key, value in properties.items():
self.raw[key] = serialize(value)
else:
raise ValueError(
"Query#set requires a key and value, a properties dictionary or keyword arguments.")
return self
@utils.immutable
def columns(self, required_type=None, *values):
for column in self.api.columns.normalize(values, wrap=True):
if required_type and required_type != column.type:
raise ValueError('Tried to add {type} but received: {column}'.format(
type=required_type,
column=column,
))
self.raw[column.type + 's'].append(column.id)
return self
# TODO: maybe do something smarter, like {granularity} {metrics}
# by {dimensions} for {segment}, filtered by {filters}.
# First {limit} results from {start} to {end} /
# for {start=end}, sorted by {direction} {sort}.
@property
def description(self):
"""
A list of the metrics this query will ask for.
"""
if 'metrics' in self.raw:
metrics = self.raw['metrics']
head = metrics[0:-1] or metrics[0:1]
text = ", ".join(head)
if len(metrics) > 1:
tail = metrics[-1]
text = text + " and " + tail
else:
text = 'n/a'
return text
@property
def title(self):
return self._title or self.description
@title.setter
def title(self, value):
self._title = value
def metrics(self, *metrics):
"""
Return a new query with additional metrics.
```python
query.metrics('pageviews', 'page load time')
```
"""
return self.columns('metric', *metrics)
def dimensions(self, *dimensions):
"""
Return a new query with additional dimensions.
```python
query.dimensions('search term', 'search depth')
```
"""
return self.columns('dimension', *dimensions)
@utils.immutable
@utils.immutable
def filter(self, value=None, exclude=False, **selection):
""" Most of the actual functionality lives on the Column
object and the `all` and `any` functions. """
filters = self.meta.setdefault('filters', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [value]
elif len(selection):
value = select(self.api.columns, selection, invert=exclude)
filters.append(value)
self.raw['filters'] = utils.paste(filters, ',', ';')
return self
def exclude(self, **selection):
return self.filter(exclude=True, **selection)
def build(self, copy=True):
if copy:
raw = deepcopy(self.raw)
else:
raw = self.raw
raw['metrics'] = ','.join(self.raw['metrics'])
if len(raw['dimensions']):
raw['dimensions'] = ','.join(self.raw['dimensions'])
else:
raw['dimensions'] = None
return raw
@property
def cacheable(self):
start = 'start_date' in self.raw and not utils.date.is_relative(self.raw['start_date'])
end = 'end_date' in self.raw and not utils.date.is_relative(self.raw['end_date'])
return start and end
@property
def signature(self):
query = self.build(copy=False)
standardized_query = sorted(query.items(), key=lambda t: t[0])
serialized_query = json.dumps(standardized_query)
return hashlib.sha1(serialized_query.encode('utf-8')).hexdigest()
def execute(self):
raw = self.build()
if self.api.cache and self.cacheable and self.api.cache.exists(self.signature):
response = self.api.cache.get(raw)
else:
try:
self._wait()
response = self.endpoint.get(**raw).execute()
except Exception as err:
if isinstance(err, TypeError):
width = max(map(len, self.raw.keys()))
raw = [(key.ljust(width), value) for key, value in self.raw.items()]
parameters = utils.paste(raw, '\t', '\n')
diagnostics = utils.format(
"""
{message}
The query you submitted was:
{parameters}
""", message=str(err), parameters=parameters)
raise errors.InvalidRequestError(diagnostics)
else:
raise err
if self.api.cache and self.cacheable:
self.api.cache.set(raw, response)
return Report(response, self)
@property
def report(self):
if not self._report:
self._report = self.get()
return self._report
# lazy-loading shortcuts
def __getattr__(self, name):
# IPython shell display should not trigger lazy-loading
# (arguably this is an IPython issue and not our problem, but let's be pragmatic)
if name == '_ipython_display_':
raise AttributeError('Query objects have no custom IPython display behavior')
elif hasattr(self.report, name):
return getattr(self.report, name)
else:
raise AttributeError("'{cls}' object and its associated 'Report' object have no attribute '{name}'".format(
cls=self.__class__.__name__,
name=name,
))
def __repr__(self):
return "<googleanalytics.query.{} object: {} ({})>".format(self.__class__.__name__, self.title, self.profile.name)
|
debrouwere/google-analytics | googleanalytics/query.py | Query.filter | python | def filter(self, value=None, exclude=False, **selection):
filters = self.meta.setdefault('filters', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [value]
elif len(selection):
value = select(self.api.columns, selection, invert=exclude)
filters.append(value)
self.raw['filters'] = utils.paste(filters, ',', ';')
return self | Most of the actual functionality lives on the Column
object and the `all` and `any` functions. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L559-L573 | [
"def select(source, selection, invert=False):\n selections = []\n for key, values in selection.items():\n if '__' in key:\n column, method = key.split('__')\n else:\n column = key\n method = 'eq'\n\n if not hasattr(Column, method):\n raise Value... | class Query(object):
"""
Return a query for certain metrics and dimensions.
```python
# pageviews (metric) as a function of geographical region
profile.core.query('pageviews', 'region')
# pageviews as a function of browser
profile.core.query(['pageviews'], ['browser'])
```
The returned query can then be further refined using
all methods available on the `CoreQuery` object, such as
`limit`, `sort`, `segment` and so on.
Metrics and dimensions may be either strings (the column id or
the human-readable column name) or Metric or Dimension
objects.
Metrics and dimensions specified as a string are not case-sensitive.
```python
profile.query('PAGEVIEWS')
```
If specifying only a single metric or dimension, you can
but are not required to wrap it in a list.
"""
_lock = 0
def __init__(self, api, parameters={}, metadata={}, title=None):
self._title = title
self.raw = {
'ids': 'ga:' + api.profile.id,
'metrics': [],
'dimensions': [],
}
self.raw.update(parameters)
self.meta = {}
self.meta.update(metadata)
self.api = api
self.profile = api.profile
self.webproperty = api.profile.webproperty
self.account = api.profile.webproperty.account
self._report = None
# do not execute more than one query per second
def _wait(self):
now = time.time()
elapsed = now - self._lock
wait = max(0, 1 - elapsed)
time.sleep(wait)
self._lock = time.time()
return wait
@property
def endpoint(self):
return self.account.service.data().ga()
def clone(self):
query = self.__class__(
api=self.api,
parameters=deepcopy(self.raw),
metadata=deepcopy(self.meta),
)
return query
@utils.immutable
def set(self, key=None, value=None, **kwargs):
"""
`set` is a way to add raw properties to the request,
for features that this module does not
support or supports incompletely. For convenience's
sake, it will serialize Column objects but will
leave any other kind of value alone.
"""
serialize = partial(self.api.columns.serialize, greedy=False)
if key and value:
self.raw[key] = serialize(value)
elif key or kwargs:
properties = key or kwargs
for key, value in properties.items():
self.raw[key] = serialize(value)
else:
raise ValueError(
"Query#set requires a key and value, a properties dictionary or keyword arguments.")
return self
@utils.immutable
def columns(self, required_type=None, *values):
for column in self.api.columns.normalize(values, wrap=True):
if required_type and required_type != column.type:
raise ValueError('Tried to add {type} but received: {column}'.format(
type=required_type,
column=column,
))
self.raw[column.type + 's'].append(column.id)
return self
# TODO: maybe do something smarter, like {granularity} {metrics}
# by {dimensions} for {segment}, filtered by {filters}.
# First {limit} results from {start} to {end} /
# for {start=end}, sorted by {direction} {sort}.
@property
def description(self):
"""
A list of the metrics this query will ask for.
"""
if 'metrics' in self.raw:
metrics = self.raw['metrics']
head = metrics[0:-1] or metrics[0:1]
text = ", ".join(head)
if len(metrics) > 1:
tail = metrics[-1]
text = text + " and " + tail
else:
text = 'n/a'
return text
@property
def title(self):
return self._title or self.description
@title.setter
def title(self, value):
self._title = value
def metrics(self, *metrics):
"""
Return a new query with additional metrics.
```python
query.metrics('pageviews', 'page load time')
```
"""
return self.columns('metric', *metrics)
def dimensions(self, *dimensions):
"""
Return a new query with additional dimensions.
```python
query.dimensions('search term', 'search depth')
```
"""
return self.columns('dimension', *dimensions)
@utils.immutable
def sort(self, *columns, **options):
"""
Return a new query which will produce results sorted by
one or more metrics or dimensions. You may use plain
strings for the columns, or actual `Column`, `Metric`
and `Dimension` objects.
Add a minus in front of the metric (either the string or
the object) to sort in descending order.
```python
# sort using strings
query.sort('pageviews', '-device type')
# alternatively, ask for a descending sort in a keyword argument
query.sort('pageviews', descending=True)
# sort using metric, dimension or column objects
pageviews = profile.core.metrics['pageviews']
query.sort(-pageviews)
```
"""
sorts = self.meta.setdefault('sort', [])
for column in columns:
if isinstance(column, Column):
identifier = column.id
elif isinstance(column, utils.basestring):
descending = column.startswith('-') or options.get('descending', False)
identifier = self.api.columns[column.lstrip('-')].id
else:
raise ValueError("Can only sort on columns or column strings. Received: {}".format(column))
if descending:
sign = '-'
else:
sign = ''
sorts.append(sign + identifier)
self.raw['sort'] = ",".join(sorts)
return self
@utils.immutable
def exclude(self, **selection):
return self.filter(exclude=True, **selection)
def build(self, copy=True):
if copy:
raw = deepcopy(self.raw)
else:
raw = self.raw
raw['metrics'] = ','.join(self.raw['metrics'])
if len(raw['dimensions']):
raw['dimensions'] = ','.join(self.raw['dimensions'])
else:
raw['dimensions'] = None
return raw
@property
def cacheable(self):
start = 'start_date' in self.raw and not utils.date.is_relative(self.raw['start_date'])
end = 'end_date' in self.raw and not utils.date.is_relative(self.raw['end_date'])
return start and end
@property
def signature(self):
query = self.build(copy=False)
standardized_query = sorted(query.items(), key=lambda t: t[0])
serialized_query = json.dumps(standardized_query)
return hashlib.sha1(serialized_query.encode('utf-8')).hexdigest()
def execute(self):
raw = self.build()
if self.api.cache and self.cacheable and self.api.cache.exists(self.signature):
response = self.api.cache.get(raw)
else:
try:
self._wait()
response = self.endpoint.get(**raw).execute()
except Exception as err:
if isinstance(err, TypeError):
width = max(map(len, self.raw.keys()))
raw = [(key.ljust(width), value) for key, value in self.raw.items()]
parameters = utils.paste(raw, '\t', '\n')
diagnostics = utils.format(
"""
{message}
The query you submitted was:
{parameters}
""", message=str(err), parameters=parameters)
raise errors.InvalidRequestError(diagnostics)
else:
raise err
if self.api.cache and self.cacheable:
self.api.cache.set(raw, response)
return Report(response, self)
@property
def report(self):
if not self._report:
self._report = self.get()
return self._report
# lazy-loading shortcuts
def __getattr__(self, name):
# IPython shell display should not trigger lazy-loading
# (arguably this is an IPython issue and not our problem, but let's be pragmatic)
if name == '_ipython_display_':
raise AttributeError('Query objects have no custom IPython display behavior')
elif hasattr(self.report, name):
return getattr(self.report, name)
else:
raise AttributeError("'{cls}' object and its associated 'Report' object have no attribute '{name}'".format(
cls=self.__class__.__name__,
name=name,
))
def __repr__(self):
return "<googleanalytics.query.{} object: {} ({})>".format(self.__class__.__name__, self.title, self.profile.name)
|
debrouwere/google-analytics | googleanalytics/query.py | CoreQuery.precision | python | def precision(self, precision):
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self | For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
``` | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L702-L731 | null | class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
@utils.immutable
def interval(self, granularity):
"""
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
"""
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self
@utils.immutable
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
def limit(self, *_range):
"""
Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows.
"""
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues
"""
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def next(self):
"""
Return a new query with a modified `start_index`.
Mainly used internally to paginate through results.
"""
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self
def get(self):
"""
Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance.
"""
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report
|
debrouwere/google-analytics | googleanalytics/query.py | CoreQuery.interval | python | def interval(self, granularity):
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self | Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L734-L755 | null | class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
def precision(self, precision):
"""
For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
```
"""
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self
@utils.immutable
@utils.immutable
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
def limit(self, *_range):
"""
Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows.
"""
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues
"""
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def next(self):
"""
Return a new query with a modified `start_index`.
Mainly used internally to paginate through results.
"""
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self
def get(self):
"""
Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance.
"""
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report
|
debrouwere/google-analytics | googleanalytics/query.py | CoreQuery.range | python | def range(self, start=None, stop=None, months=0, days=0):
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self | Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L758-L798 | [
"def range(start=None, stop=None, months=0, days=0):\n yesterday = datetime.date.today() - relativedelta(days=1)\n start = normalize(start) or yesterday\n stop = normalize(stop)\n is_past = days < 0 or months < 0\n\n if days or months:\n if start and stop:\n raise Exception(\n ... | class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
def precision(self, precision):
"""
For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
```
"""
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self
@utils.immutable
def interval(self, granularity):
"""
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
"""
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self
@utils.immutable
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
def limit(self, *_range):
"""
Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows.
"""
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues
"""
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def next(self):
"""
Return a new query with a modified `start_index`.
Mainly used internally to paginate through results.
"""
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self
def get(self):
"""
Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance.
"""
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report
|
debrouwere/google-analytics | googleanalytics/query.py | CoreQuery.limit | python | def limit(self, *_range):
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self | Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L866-L895 | null | class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
def precision(self, precision):
"""
For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
```
"""
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self
@utils.immutable
def interval(self, granularity):
"""
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
"""
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self
@utils.immutable
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues
"""
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def next(self):
"""
Return a new query with a modified `start_index`.
Mainly used internally to paginate through results.
"""
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self
def get(self):
"""
Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance.
"""
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report
|
debrouwere/google-analytics | googleanalytics/query.py | CoreQuery.segment | python | def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self | Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L909-L1003 | [
"def select(source, selection, invert=False):\n selections = []\n for key, values in selection.items():\n if '__' in key:\n column, method = key.split('__')\n else:\n column = key\n method = 'eq'\n\n if not hasattr(Column, method):\n raise Value... | class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
def precision(self, precision):
"""
For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
```
"""
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self
@utils.immutable
def interval(self, granularity):
"""
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
"""
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self
@utils.immutable
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
def limit(self, *_range):
"""
Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows.
"""
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def next(self):
"""
Return a new query with a modified `start_index`.
Mainly used internally to paginate through results.
"""
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self
def get(self):
"""
Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance.
"""
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report
|
debrouwere/google-analytics | googleanalytics/query.py | CoreQuery.next | python | def next(self):
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self | Return a new query with a modified `start_index`.
Mainly used internally to paginate through results. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L1012-L1020 | null | class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
def precision(self, precision):
"""
For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
```
"""
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self
@utils.immutable
def interval(self, granularity):
"""
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
"""
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self
@utils.immutable
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
def limit(self, *_range):
"""
Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows.
"""
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues
"""
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def get(self):
"""
Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance.
"""
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report
|
debrouwere/google-analytics | googleanalytics/query.py | CoreQuery.get | python | def get(self):
cursor = self
report = None
is_complete = False
is_enough = False
while not (is_enough or is_complete):
chunk = cursor.execute()
if report:
report.append(chunk.raw[0], cursor)
else:
report = chunk
is_enough = len(report.rows) >= self.meta.get('limit', float('inf'))
is_complete = chunk.is_complete
cursor = cursor.next()
return report | Run the query and return a `Report`.
This method transparently handles paginated results, so even for results that
are larger than the maximum amount of rows the Google Analytics API will
return in a single request, or larger than the amount of rows as specified
through `CoreQuery#step`, `get` will leaf through all pages,
concatenate the results and produce a single Report instance. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L1022-L1050 | [
"def execute(self):\n raw = self.build()\n\n if self.api.cache and self.cacheable and self.api.cache.exists(self.signature):\n response = self.api.cache.get(raw)\n else:\n try:\n self._wait()\n response = self.endpoint.get(**raw).execute()\n except Exception as er... | class CoreQuery(Query):
"""
CoreQuery is the main way through which to produce reports
from data in Google Analytics.
The most important methods are:
* `metrics` and `dimensions` (both of which you can also pass as
lists when creating the query)
* `range` and its shortcuts that have the granularity already set:
`hourly`, `daily`, `weekly`, `monthly`, `yearly`, `total`
* `filter` to filter which rows are analyzed before running the query
* `segment` to filter down to a certain kind of session or user (as
opposed to `filter` which works on individual rows of data)
* `limit` to ask for a subset of results
* `sort` to sort the query
CoreQuery is mostly immutable: wherever possible, methods
return a new query rather than modifying the existing one,
so for example this works as you'd expect it to:
```python
base = profile.query('pageviews')
january = base.daily('2014-01-01', months=1).get()
february = base.daily('2014-02-01', months=1).get()
```
"""
# TODO (?)
# fields
# userIp / quotaUser
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#q_summary
PRECISION_LEVELS = ('FASTER', 'DEFAULT', 'HIGHER_PRECISION', )
GRANULARITY_LEVELS = ('year', 'month', 'week', 'day', 'hour', )
GRANULARITY_DIMENSIONS = (
'ga:year', 'ga:yearMonth', 'ga:yearWeek',
'ga:date', 'ga:dateHour',
)
@utils.immutable
def precision(self, precision):
"""
For queries that should run faster, you may specify a lower precision,
and for those that need to be more precise, a higher precision:
```python
# faster queries
query.range('2014-01-01', '2014-01-31', precision=0)
query.range('2014-01-01', '2014-01-31', precision='FASTER')
# queries with the default level of precision (usually what you want)
query.range('2014-01-01', '2014-01-31')
query.range('2014-01-01', '2014-01-31', precision=1)
query.range('2014-01-01', '2014-01-31', precision='DEFAULT')
# queries that are more precise
query.range('2014-01-01', '2014-01-31', precision=2)
query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION')
```
"""
if isinstance(precision, int):
precision = self.PRECISION_LEVELS[precision]
if precision not in self.PRECISION_LEVELS:
levels = ", ".join(self.PRECISION_LEVELS)
raise ValueError("Precision should be one of: " + levels)
if precision != 'DEFAULT':
self.raw.update({'samplingLevel': precision})
return self
@utils.immutable
def interval(self, granularity):
"""
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
"""
if granularity == 'total':
return self
if not isinstance(granularity, int):
if granularity in self.GRANULARITY_LEVELS:
granularity = self.GRANULARITY_LEVELS.index(granularity)
else:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: lifetime, " + levels)
dimension = self.GRANULARITY_DIMENSIONS[granularity]
self.raw['dimensions'].insert(0, dimension)
return self
@utils.immutable
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self
@inspector.implements(range)
def hourly(self, *vargs, **kwargs):
return self.interval('hour').range(*vargs, **kwargs)
@inspector.implements(range)
def daily(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by day. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='day'`.
"""
return self.interval('day').range(*vargs, **kwargs)
@inspector.implements(range)
def weekly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by week. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='week'`.
"""
return self.interval('week').range(*vargs, **kwargs)
@inspector.implements(range)
def monthly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by month. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='month'`.
"""
return self.interval('month').range(*vargs, **kwargs)
@inspector.implements(range)
def yearly(self, *vargs, **kwargs):
"""
Return a new query that fetches metrics within a certain date
range, summarized by year. This method is identical to
`CoreQuery#range` but it sets the default granularity to
`granularity='year'`.
"""
return self.interval('year').range(*vargs, **kwargs)
@inspector.implements(range)
def total(self, *vargs, **kwargs):
return self.range(*vargs, **kwargs)
@utils.immutable
def step(self, maximum):
"""
Return a new query with a maximum amount of results to be returned
in any one request, without implying that we should stop
fetching beyond that limit (unlike `CoreQuery#limit`.)
Useful in debugging pagination functionality.
Perhaps also useful when you want to be able to decide whether to
continue fetching data, based on the data you've already received.
"""
self.raw['max_results'] = maximum
return self
@utils.immutable
def limit(self, *_range):
"""
Return a new query, limited to a certain number of results.
```python
# first 100
query.limit(100)
# 50 to 60
query.limit(50, 10)
```
Please note carefully that Google Analytics uses
1-indexing on its rows.
"""
# uses the same argument order as
# LIMIT in a SQL database
if len(_range) == 2:
start, maximum = _range
else:
start = 1
maximum = _range[0]
self.meta['limit'] = maximum
self.raw.update({
'start_index': start,
'max_results': maximum,
})
return self
@utils.immutable
def segment_sequence(self, followed_by=False, immediately_followed_by=False, first=False):
# sequences are just really hard to "simplify" because so much is possible
if followed_by or immediately_followed_by:
method = 'sequence'
else:
method = 'condition'
raise NotImplementedError()
@utils.immutable
def segment(self, value=None, scope=None, metric_scope=None, **selection):
"""
Return a new query, limited to a segment of all users or sessions.
Accepts segment objects, filtered segment objects and segment names:
```python
query.segment(account.segments['browser'])
query.segment('browser')
query.segment(account.segments['browser'].any('Chrome', 'Firefox'))
```
Segment can also accept a segment expression when you pass
in a `type` argument. The type argument can be either `users`
or `sessions`. This is pretty close to the metal.
```python
# will be translated into `users::condition::perUser::ga:sessions>10`
query.segment('condition::perUser::ga:sessions>10', type='users')
```
See the [Google Analytics dynamic segments documentation][segments]
You can also use the `any`, `all`, `followed_by` and
`immediately_followed_by` functions in this module to
chain together segments.
Everything about how segments get handled is still in flux.
Feel free to propose ideas for a nicer interface on
the [GitHub issues page][issues]
[segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference
[issues]: https://github.com/debrouwere/google-analytics/issues
"""
"""
Technical note to self about segments:
* users or sessions
* sequence or condition
* scope (perHit, perSession, perUser -- gte primary scope)
Multiple conditions can be ANDed or ORed together; these two are equivalent
users::condition::ga:revenue>10;ga:sessionDuration>60
users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60
For sequences, prepending ^ means the first part of the sequence has to match
the first session/hit/...
* users and sessions conditions can be combined (but only with AND)
* sequences and conditions can also be combined (but only with AND)
sessions::sequence::ga:browser==Chrome;
condition::perHit::ga:timeOnPage>5
->>
ga:deviceCategory==mobile;ga:revenue>10;
users::sequence::ga:deviceCategory==desktop
->>
ga:deviceCategory=mobile;
ga:revenue>100;
condition::ga:browser==Chrome
Problem: keyword arguments are passed as a dictionary, not an ordered dictionary!
So e.g. this is risky
query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True)
"""
SCOPES = {
'hits': 'perHit',
'sessions': 'perSession',
'users': 'perUser',
}
segments = self.meta.setdefault('segments', [])
if value and len(selection):
raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.")
elif value:
value = [self.api.segments.serialize(value)]
elif len(selection):
if not scope:
raise ValueError("Scope is required. Choose from: users, sessions.")
if metric_scope:
metric_scope = SCOPES[metric_scope]
value = select(self.api.columns, selection)
value = [[scope, 'condition', metric_scope, condition] for condition in value]
value = ['::'.join(filter(None, condition)) for condition in value]
segments.append(value)
self.raw['segment'] = utils.paste(segments, ',', ';')
return self
def users(self, **kwargs):
return self.segment(scope='users', **kwargs)
def sessions(self, **kwargs):
return self.segment(scope='sessions', **kwargs)
@utils.immutable
def next(self):
"""
Return a new query with a modified `start_index`.
Mainly used internally to paginate through results.
"""
step = self.raw.get('max_results', 1000)
start = self.raw.get('start_index', 1) + step
self.raw['start_index'] = start
return self
|
debrouwere/google-analytics | googleanalytics/query.py | RealTimeQuery.limit | python | def limit(self, maximum):
self.meta['limit'] = maximum
self.raw.update({
'max_results': maximum,
})
return self | Return a new query, limited to a certain number of results.
Unlike core reporting queries, you cannot specify a starting
point for live queries, just the maximum results returned.
```python
# first 50
query.limit(50)
``` | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L1068-L1085 | null | class RealTimeQuery(Query):
"""
A query against the [Google Analytics Real Time API][realtime].
**Note:** brand new! Please test and submit any issues to GitHub.
[realtime]: https://developers.google.com/analytics/devguides/reporting/realtime/v3/reference/data/realtime#resource
"""
@property
def endpoint(self):
return self.account.service.data().realtime()
@utils.immutable
def get(self):
return self.execute()
|
debrouwere/google-analytics | googleanalytics/auth/credentials.py | Credentials.valid | python | def valid(self):
two_legged = self.client_email and self.private_key
three_legged = self.client_id and self.client_secret
return two_legged or three_legged or False | Valid credentials are not necessarily correct, but
they contain all necessary information for an
authentication attempt. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/auth/credentials.py#L136-L142 | null | class Credentials(object):
STRATEGIES = {
'params': from_params,
'keyring': from_keyring,
'environment': from_environment,
'prompt': from_prompt,
}
INTERACTIVE_STRATEGIES = ['params', 'keyring', 'environment', 'prompt']
UNSUPERVISED_STRATEGIES = ['params', 'keyring', 'environment']
@classmethod
def find(cls, interactive=False, valid=False, complete=False, **params):
if interactive:
strategies = copy(cls.INTERACTIVE_STRATEGIES)
else:
strategies = copy(cls.UNSUPERVISED_STRATEGIES)
attempted = ", ".join(strategies)
credentials = cls()
while credentials.incomplete and len(strategies):
strategy = strategies.pop(0)
properties = cls.STRATEGIES[strategy](**params) or {}
for key, value in properties.items():
if not getattr(credentials, key):
setattr(credentials, key, value)
if not params.get(key):
params[key] = value
# the environment variable suffix is often a good
# descriptor of the nature of these credentials,
# when lacking anything better
if params.get('identity'):
credentials.identity = params['identity']
elif params.get('suffix') and credentials.identity is credentials.client_id:
credentials.identity = params.get('suffix')
if complete and credentials.incomplete:
raise KeyError("Could not find client credentials and token. Tried {attempted}.".format(
attempted=attempted))
elif valid and credentials.invalid:
raise KeyError("Could not find client id and client secret. Tried {attempted}.".format(
attempted=attempted))
else:
return credentials
def __init__(self, client_id=None, client_secret=None,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
identity=None):
self.client_id = client_id
self.client_secret = client_secret
self.client_email = client_email
self.private_key = private_key
self.access_token = access_token
self.refresh_token = refresh_token
self._identity = identity
@property
def token(self):
return self.refresh_token or self.access_token
@property
def identity(self):
return self._identity or self.client_id
@identity.setter
def identity(self, value):
self._identity = value
@property
def type(self):
if self.client_email and self.private_key:
return 2
elif self.client_id and self.client_secret:
return 3
else:
return None
@property
@property
def invalid(self):
return not self.valid
@property
def complete(self):
""" Complete credentials are valid and are either two-legged or include a token. """
return self.valid and (self.access_token or self.refresh_token or self.type == 2)
@property
def incomplete(self):
return not self.complete
@property
def oauth(self):
if self.incomplete:
return None
else:
if self.type == 2:
return oauth2client.client.SignedJwtAssertionCredentials(
service_account_name=self.client_email,
private_key=self.private_key.encode('utf-8'),
scope='https://www.googleapis.com/auth/analytics.readonly',
)
else:
return oauth2client.client.OAuth2Credentials(
access_token=self.access_token,
client_id=self.client_id,
client_secret=self.client_secret,
refresh_token=self.refresh_token,
token_expiry=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
user_agent=None,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
id_token=None,
token_response=None
)
def serialize(self):
return {
'identity': self.identity,
'client_id': self.client_id,
'client_secret': self.client_secret,
'client_email': self.client_email,
'private_key': self.private_key,
'access_token': self.access_token,
'refresh_token': self.refresh_token,
}
def authorize(self):
return self.oauth.authorize(httplib2.Http())
def revoke(self):
if not self.token:
raise KeyError("Cannot revoke a token when no token was provided.")
# `credentials.revoke` will try to revoke the refresh token even
# if it's None, which will fail, so we have to miss with the innards
# of oauth2client here a little bit
return self.oauth._do_revoke(httplib2.Http().request, self.token)
|
debrouwere/google-analytics | googleanalytics/auth/credentials.py | Credentials.complete | python | def complete(self):
return self.valid and (self.access_token or self.refresh_token or self.type == 2) | Complete credentials are valid and are either two-legged or include a token. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/auth/credentials.py#L149-L151 | null | class Credentials(object):
STRATEGIES = {
'params': from_params,
'keyring': from_keyring,
'environment': from_environment,
'prompt': from_prompt,
}
INTERACTIVE_STRATEGIES = ['params', 'keyring', 'environment', 'prompt']
UNSUPERVISED_STRATEGIES = ['params', 'keyring', 'environment']
@classmethod
def find(cls, interactive=False, valid=False, complete=False, **params):
if interactive:
strategies = copy(cls.INTERACTIVE_STRATEGIES)
else:
strategies = copy(cls.UNSUPERVISED_STRATEGIES)
attempted = ", ".join(strategies)
credentials = cls()
while credentials.incomplete and len(strategies):
strategy = strategies.pop(0)
properties = cls.STRATEGIES[strategy](**params) or {}
for key, value in properties.items():
if not getattr(credentials, key):
setattr(credentials, key, value)
if not params.get(key):
params[key] = value
# the environment variable suffix is often a good
# descriptor of the nature of these credentials,
# when lacking anything better
if params.get('identity'):
credentials.identity = params['identity']
elif params.get('suffix') and credentials.identity is credentials.client_id:
credentials.identity = params.get('suffix')
if complete and credentials.incomplete:
raise KeyError("Could not find client credentials and token. Tried {attempted}.".format(
attempted=attempted))
elif valid and credentials.invalid:
raise KeyError("Could not find client id and client secret. Tried {attempted}.".format(
attempted=attempted))
else:
return credentials
def __init__(self, client_id=None, client_secret=None,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
identity=None):
self.client_id = client_id
self.client_secret = client_secret
self.client_email = client_email
self.private_key = private_key
self.access_token = access_token
self.refresh_token = refresh_token
self._identity = identity
@property
def token(self):
return self.refresh_token or self.access_token
@property
def identity(self):
return self._identity or self.client_id
@identity.setter
def identity(self, value):
self._identity = value
@property
def type(self):
if self.client_email and self.private_key:
return 2
elif self.client_id and self.client_secret:
return 3
else:
return None
@property
def valid(self):
""" Valid credentials are not necessarily correct, but
they contain all necessary information for an
authentication attempt. """
two_legged = self.client_email and self.private_key
three_legged = self.client_id and self.client_secret
return two_legged or three_legged or False
@property
def invalid(self):
return not self.valid
@property
@property
def incomplete(self):
return not self.complete
@property
def oauth(self):
if self.incomplete:
return None
else:
if self.type == 2:
return oauth2client.client.SignedJwtAssertionCredentials(
service_account_name=self.client_email,
private_key=self.private_key.encode('utf-8'),
scope='https://www.googleapis.com/auth/analytics.readonly',
)
else:
return oauth2client.client.OAuth2Credentials(
access_token=self.access_token,
client_id=self.client_id,
client_secret=self.client_secret,
refresh_token=self.refresh_token,
token_expiry=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
user_agent=None,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
id_token=None,
token_response=None
)
def serialize(self):
return {
'identity': self.identity,
'client_id': self.client_id,
'client_secret': self.client_secret,
'client_email': self.client_email,
'private_key': self.private_key,
'access_token': self.access_token,
'refresh_token': self.refresh_token,
}
def authorize(self):
return self.oauth.authorize(httplib2.Http())
def revoke(self):
if not self.token:
raise KeyError("Cannot revoke a token when no token was provided.")
# `credentials.revoke` will try to revoke the refresh token even
# if it's None, which will fail, so we have to miss with the innards
# of oauth2client here a little bit
return self.oauth._do_revoke(httplib2.Http().request, self.token)
|
debrouwere/google-analytics | googleanalytics/auth/__init__.py | authenticate | python | def authenticate(
client_id=None, client_secret=None,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
account=None, webproperty=None, profile=None,
identity=None, prefix=None, suffix=None,
interactive=False, save=False):
credentials = oauth.Credentials.find(
valid=True,
interactive=interactive,
prefix=prefix,
suffix=suffix,
client_id=client_id,
client_secret=client_secret,
client_email=client_email,
private_key=private_key,
access_token=access_token,
refresh_token=refresh_token,
identity=identity,
)
if credentials.incomplete:
if interactive:
credentials = authorize(
client_id=credentials.client_id,
client_secret=credentials.client_secret,
save=save,
identity=credentials.identity,
prefix=prefix,
suffix=suffix,
)
elif credentials.type == 2:
credentials = authorize(
client_email=credentials.client_email,
private_key=credentials.private_key,
identity=credentials.identity,
save=save,
)
else:
raise KeyError("Cannot authenticate: enable interactive authorization, pass a token or use a service account.")
accounts = oauth.authenticate(credentials)
scope = navigate(accounts, account=account, webproperty=webproperty, profile=profile)
return scope | The `authenticate` function will authenticate the user with the Google Analytics API,
using a variety of strategies: keyword arguments provided to this function, credentials
stored in in environment variables, credentials stored in the keychain and, finally, by
asking for missing information interactively in a command-line prompt.
If necessary (but only if `interactive=True`) this function will also allow the user
to authorize this Python module to access Google Analytics data on their behalf,
using an OAuth2 token. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/auth/__init__.py#L43-L97 | [
"def navigate(accounts, account=None, webproperty=None, profile=None, default_profile=True):\n if webproperty and not account:\n raise KeyError(\"Cannot navigate to a webproperty or profile without knowing the account.\")\n if profile and not (webproperty and account):\n raise KeyError(\"Cannot ... | # encoding: utf-8
"""
Convenience functions for authenticating with Google
and asking for authorization with Google, with
`authenticate` at its core.
`authenticate` will do what it says on the tin, but unlike
the basic `googleanalytics.oauth.authenticate`, it also tries
to get existing credentials from the keyring, from environment
variables, it prompts for information when required and so on.
"""
from . import keyring
from . import oauth
from .oauth import Flow, Credentials
def navigate(accounts, account=None, webproperty=None, profile=None, default_profile=True):
if webproperty and not account:
raise KeyError("Cannot navigate to a webproperty or profile without knowing the account.")
if profile and not (webproperty and account):
raise KeyError("Cannot navigate to a profile without knowing account and webproperty.")
if profile:
return accounts[account].webproperties[webproperty].profiles[profile]
elif webproperty:
scope = accounts[account].webproperties[webproperty]
if default_profile:
return scope.profile
else:
return scope
elif account:
return accounts[account]
else:
return accounts
def find(**kwargs):
return oauth.Credentials.find(**kwargs)
def identity(name):
return find(identity=name)
def authenticate(
client_id=None, client_secret=None,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
account=None, webproperty=None, profile=None,
identity=None, prefix=None, suffix=None,
interactive=False, save=False):
"""
The `authenticate` function will authenticate the user with the Google Analytics API,
using a variety of strategies: keyword arguments provided to this function, credentials
stored in in environment variables, credentials stored in the keychain and, finally, by
asking for missing information interactively in a command-line prompt.
If necessary (but only if `interactive=True`) this function will also allow the user
to authorize this Python module to access Google Analytics data on their behalf,
using an OAuth2 token.
"""
credentials = oauth.Credentials.find(
valid=True,
interactive=interactive,
prefix=prefix,
suffix=suffix,
client_id=client_id,
client_secret=client_secret,
client_email=client_email,
private_key=private_key,
access_token=access_token,
refresh_token=refresh_token,
identity=identity,
)
if credentials.incomplete:
if interactive:
credentials = authorize(
client_id=credentials.client_id,
client_secret=credentials.client_secret,
save=save,
identity=credentials.identity,
prefix=prefix,
suffix=suffix,
)
elif credentials.type == 2:
credentials = authorize(
client_email=credentials.client_email,
private_key=credentials.private_key,
identity=credentials.identity,
save=save,
)
else:
raise KeyError("Cannot authenticate: enable interactive authorization, pass a token or use a service account.")
accounts = oauth.authenticate(credentials)
scope = navigate(accounts, account=account, webproperty=webproperty, profile=profile)
return scope
def authorize(client_id=None, client_secret=None, client_email=None, private_key=None, save=False, identity=None, prefix=None, suffix=None):
base_credentials = oauth.Credentials.find(
valid=True,
interactive=True,
identity=identity,
client_id=client_id,
client_secret=client_secret,
client_email=client_email,
private_key=private_key,
prefix=prefix,
suffix=suffix,
)
if base_credentials.incomplete:
credentials = oauth.authorize(base_credentials.client_id, base_credentials.client_secret)
credentials.identity = base_credentials.identity
else:
credentials = base_credentials
if save:
keyring.set(credentials.identity, credentials.serialize())
return credentials
def revoke(client_id, client_secret,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
identity=None, prefix=None, suffix=None):
"""
Given a client id, client secret and either an access token or a refresh token,
revoke OAuth access to the Google Analytics data and remove any stored credentials
that use these tokens.
"""
if client_email and private_key:
raise ValueError('Two-legged OAuth does not use revokable tokens.')
credentials = oauth.Credentials.find(
complete=True,
interactive=False,
identity=identity,
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
refresh_token=refresh_token,
prefix=prefix,
suffix=suffix,
)
retval = credentials.revoke()
keyring.delete(credentials.identity)
return retval
|
debrouwere/google-analytics | googleanalytics/auth/__init__.py | revoke | python | def revoke(client_id, client_secret,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
identity=None, prefix=None, suffix=None):
if client_email and private_key:
raise ValueError('Two-legged OAuth does not use revokable tokens.')
credentials = oauth.Credentials.find(
complete=True,
interactive=False,
identity=identity,
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
refresh_token=refresh_token,
prefix=prefix,
suffix=suffix,
)
retval = credentials.revoke()
keyring.delete(credentials.identity)
return retval | Given a client id, client secret and either an access token or a refresh token,
revoke OAuth access to the Google Analytics data and remove any stored credentials
that use these tokens. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/auth/__init__.py#L123-L151 | [
"def delete(name):\n keyring.delete_password(DOMAIN, name)\n",
"def find(cls, interactive=False, valid=False, complete=False, **params):\n if interactive:\n strategies = copy(cls.INTERACTIVE_STRATEGIES)\n else:\n strategies = copy(cls.UNSUPERVISED_STRATEGIES)\n\n attempted = \", \".join(... | # encoding: utf-8
"""
Convenience functions for authenticating with Google
and asking for authorization with Google, with
`authenticate` at its core.
`authenticate` will do what it says on the tin, but unlike
the basic `googleanalytics.oauth.authenticate`, it also tries
to get existing credentials from the keyring, from environment
variables, it prompts for information when required and so on.
"""
from . import keyring
from . import oauth
from .oauth import Flow, Credentials
def navigate(accounts, account=None, webproperty=None, profile=None, default_profile=True):
if webproperty and not account:
raise KeyError("Cannot navigate to a webproperty or profile without knowing the account.")
if profile and not (webproperty and account):
raise KeyError("Cannot navigate to a profile without knowing account and webproperty.")
if profile:
return accounts[account].webproperties[webproperty].profiles[profile]
elif webproperty:
scope = accounts[account].webproperties[webproperty]
if default_profile:
return scope.profile
else:
return scope
elif account:
return accounts[account]
else:
return accounts
def find(**kwargs):
return oauth.Credentials.find(**kwargs)
def identity(name):
return find(identity=name)
def authenticate(
client_id=None, client_secret=None,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
account=None, webproperty=None, profile=None,
identity=None, prefix=None, suffix=None,
interactive=False, save=False):
"""
The `authenticate` function will authenticate the user with the Google Analytics API,
using a variety of strategies: keyword arguments provided to this function, credentials
stored in in environment variables, credentials stored in the keychain and, finally, by
asking for missing information interactively in a command-line prompt.
If necessary (but only if `interactive=True`) this function will also allow the user
to authorize this Python module to access Google Analytics data on their behalf,
using an OAuth2 token.
"""
credentials = oauth.Credentials.find(
valid=True,
interactive=interactive,
prefix=prefix,
suffix=suffix,
client_id=client_id,
client_secret=client_secret,
client_email=client_email,
private_key=private_key,
access_token=access_token,
refresh_token=refresh_token,
identity=identity,
)
if credentials.incomplete:
if interactive:
credentials = authorize(
client_id=credentials.client_id,
client_secret=credentials.client_secret,
save=save,
identity=credentials.identity,
prefix=prefix,
suffix=suffix,
)
elif credentials.type == 2:
credentials = authorize(
client_email=credentials.client_email,
private_key=credentials.private_key,
identity=credentials.identity,
save=save,
)
else:
raise KeyError("Cannot authenticate: enable interactive authorization, pass a token or use a service account.")
accounts = oauth.authenticate(credentials)
scope = navigate(accounts, account=account, webproperty=webproperty, profile=profile)
return scope
def authorize(client_id=None, client_secret=None, client_email=None, private_key=None, save=False, identity=None, prefix=None, suffix=None):
base_credentials = oauth.Credentials.find(
valid=True,
interactive=True,
identity=identity,
client_id=client_id,
client_secret=client_secret,
client_email=client_email,
private_key=private_key,
prefix=prefix,
suffix=suffix,
)
if base_credentials.incomplete:
credentials = oauth.authorize(base_credentials.client_id, base_credentials.client_secret)
credentials.identity = base_credentials.identity
else:
credentials = base_credentials
if save:
keyring.set(credentials.identity, credentials.serialize())
return credentials
def revoke(client_id, client_secret,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
identity=None, prefix=None, suffix=None):
"""
Given a client id, client secret and either an access token or a refresh token,
revoke OAuth access to the Google Analytics data and remove any stored credentials
that use these tokens.
"""
if client_email and private_key:
raise ValueError('Two-legged OAuth does not use revokable tokens.')
credentials = oauth.Credentials.find(
complete=True,
interactive=False,
identity=identity,
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
refresh_token=refresh_token,
prefix=prefix,
suffix=suffix,
)
retval = credentials.revoke()
keyring.delete(credentials.identity)
return retval
|
debrouwere/google-analytics | googleanalytics/commands/query.py | query | python | def query(scope, blueprint, debug, output, with_metadata, realtime, **description):
if realtime:
description['type'] = 'realtime'
if blueprint:
queries = from_blueprint(scope, blueprint)
else:
if not isinstance(scope, ga.account.Profile):
raise ValueError("Account and webproperty needed for query.")
queries = from_args(scope, **description)
for query in queries:
if debug:
click.echo(query.build())
report = query.serialize(format=output, with_metadata=with_metadata)
click.echo(report) | e.g.
googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \
query pageviews \
--start yesterday --limit -10 --sort -pageviews \
--dimensions pagepath \
--debug | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/commands/query.py#L102-L130 | [
"def from_blueprint(scope, src):\n description = yaml.load(src)\n blueprint = ga.Blueprint(description)\n credentials = {}\n credentials.update(blueprint.identity or {})\n credentials.update(blueprint.scope)\n profile = ga.authenticate(interactive=True, save=True, **credentials)\n return bluepr... | # encoding: utf-8
import json
import yaml
import click
import googleanalytics as ga
from googleanalytics import utils
from .common import cli
# TODO: the blueprint stuff can probably be simplified so that
# it's little more than just a call to ga.describe
def from_blueprint(scope, src):
description = yaml.load(src)
blueprint = ga.Blueprint(description)
credentials = {}
credentials.update(blueprint.identity or {})
credentials.update(blueprint.scope)
profile = ga.authenticate(interactive=True, save=True, **credentials)
return blueprint.queries(profile)
# TODO: add any query generation improvements not associated with
# string parsing back into blueprint generation and query.refine
# so they apply across the board
def from_args(scope, metrics,
start, stop, days, limit,
dimensions, filter, segment,
**description):
# LIMIT can be a plain limit or start and length
if limit:
limit = list(map(int, limit.split(',')))
description.update({
'range': {
'start': start,
'stop': stop,
'days': days,
},
'metrics': utils.cut(metrics, ','),
'limit': limit,
})
if dimensions:
description['dimensions'] = utils.cut(dimensions, ',')
query = ga.query.describe(scope, description)
for f in filter:
query = ga.query.refine(query, {'filter': dict(utils.cut(f, '=', ','))})
for s in segment:
query = ga.query.refine(query, {'segment': dict(utils.cut(s, '=', ','))})
return [query]
# TODO: maybe include an --interactive option, which defers
# to `shell` but with a prefilled query?
@cli.command()
@click.argument('metrics')
@click.option('--dimensions')
@click.option('--start',
help='Start date in ISO format, e.g. 2016-01-01.')
@click.option('--stop')
@click.option('--days',
help='Days to count forward from start date, counts backwards when negative.',
default=0,
type=int)
@click.option('--limit',
help='Return only the first <n> or <start>,<n> results.')
@click.option('--sort',
help='Sort by a metric; prefix with - to sort from high to low.')
@click.option('--debug',
is_flag=True)
@click.option('--filter',
multiple=True)
@click.option('--segment',
multiple=True)
@click.option('--precision',
type=click.IntRange(0, 2),
default=1,
help='Increase or decrease query precision.')
@click.option('-i', '--interval',
type=click.Choice(['hour', 'day', 'week', 'month', 'year', 'total']),
default='total',
help='Return hourly, daily etc. numbers.')
@click.option('-o', '--output',
type=click.Choice(['csv', 'json', 'ascii']),
default='ascii',
help='Output format; human-readable ascii table by default.')
@click.option('--with-metadata',
is_flag=True)
@click.option('-b', '--blueprint',
type=click.File('r'))
@click.option('--realtime',
is_flag=True,
help='Use the RealTime API instead of the Core API.')
@click.pass_obj
|
debrouwere/google-analytics | googleanalytics/utils/functional.py | vectorize | python | def vectorize(fn):
@functools.wraps(fn)
def vectorized_method(self, values, *vargs, **kwargs):
wrap = not isinstance(values, (list, tuple))
should_unwrap = not kwargs.setdefault('wrap', False)
unwrap = wrap and should_unwrap
del kwargs['wrap']
if wrap:
values = [values]
results = [fn(self, value, *vargs, **kwargs) for value in values]
if unwrap:
results = results[0]
return results
return vectorized_method | Allows a method to accept one or more values,
but internally deal only with a single item,
and returning a list or a single item depending
on what is desired. | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/utils/functional.py#L20-L45 | null | # encoding: utf-8
import functools
import inspector
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
try:
return self.memoized[args]
except KeyError:
self.memoized[args] = self.function(*args)
return self.memoized[args]
def vectorize(fn):
"""
Allows a method to accept one or more values,
but internally deal only with a single item,
and returning a list or a single item depending
on what is desired.
"""
@functools.wraps(fn)
def vectorized_method(self, values, *vargs, **kwargs):
wrap = not isinstance(values, (list, tuple))
should_unwrap = not kwargs.setdefault('wrap', False)
unwrap = wrap and should_unwrap
del kwargs['wrap']
if wrap:
values = [values]
results = [fn(self, value, *vargs, **kwargs) for value in values]
if unwrap:
results = results[0]
return results
return vectorized_method
def immutable(method):
@inspector.wraps(method)
def wrapped_method(self, *vargs, **kwargs):
obj = self.clone()
method(obj, *vargs, **kwargs)
return obj
return wrapped_method
def identity(value):
return value
def soak(*vargs, **kwargs):
pass |
debrouwere/google-analytics | googleanalytics/account.py | Account.webproperties | python | def webproperties(self):
raw_properties = self.service.management().webproperties().list(
accountId=self.id).execute()['items']
_webproperties = [WebProperty(raw, self) for raw in raw_properties]
return addressable.List(_webproperties, indices=['id', 'name'], insensitive=True) | A list of all web properties on this account. You may
select a specific web property using its name, its id
or an index.
```python
account.webproperties[0]
account.webproperties['UA-9234823-5']
account.webproperties['debrouwere.org']
``` | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/account.py#L47-L63 | null | class Account(object):
"""
An account is usually but not always associated with a single
website. It will often contain multiple web properties
(different parts of your website that you've configured
Google Analytics to analyze separately, or simply the default
web property that every website has in Google Analytics),
which in turn will have one or more profiles.
You should navigate to a profile to run queries.
```python
import googleanalytics as ga
accounts = ga.authenticate()
profile = accounts['debrouwere.org'].webproperties['UA-12933299-1'].profiles['debrouwere.org']
report = profile.core.query('pageviews').range('2014-10-01', '2014-10-31').get()
print(report['pageviews'])
```
"""
def __init__(self, raw, service, credentials):
self.service = service
self.credentials = credentials
self.raw = raw
self.id = raw['id']
self.name = raw['name']
self.permissions = raw['permissions']['effective']
@property
@utils.memoize
@property
def query(self, *vargs, **kwargs):
""" A shortcut to the first profile of the first webproperty. """
return self.webproperties[0].query(*vargs, **kwargs)
def __repr__(self):
return "<googleanalytics.account.Account object: {} ({})>".format(
self.name, self.id)
|
debrouwere/google-analytics | googleanalytics/account.py | WebProperty.profiles | python | def profiles(self):
raw_profiles = self.account.service.management().profiles().list(
accountId=self.account.id,
webPropertyId=self.id).execute()['items']
profiles = [Profile(raw, self) for raw in raw_profiles]
return addressable.List(profiles, indices=['id', 'name'], insensitive=True) | A list of all profiles on this web property. You may
select a specific profile using its name, its id
or an index.
```python
property.profiles[0]
property.profiles['9234823']
property.profiles['marketing profile']
``` | train | https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/account.py#L98-L114 | null | class WebProperty(object):
"""
A web property is a particular website you're tracking in Google Analytics.
It has one or more profiles, and you will need to pick one from which to
launch your queries.
"""
def __init__(self, raw, account):
self.account = account
self.raw = raw
self.id = raw['id']
self.name = raw['name']
# on rare occassions, e.g. for abandoned web properties,
# a website url might not be present
self.url = raw.get('websiteUrl')
@property
def profile(self):
default = self.raw['defaultProfileId']
return self.profiles[default]
@property
@utils.memoize
def query(self, *vargs, **kwargs):
"""
A shortcut to the first profile of this webproperty.
"""
return self.profiles[0].query(*vargs, **kwargs)
def __repr__(self):
return "<googleanalytics.account.WebProperty object: {} ({})>".format(
self.name, self.id)
|
pyscaffold/configupdater | src/configupdater/configupdater.py | Block.add_before | python | def add_before(self):
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx) | Returns a builder inserting a new block before the current block | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L115-L118 | null | class Block(ABC):
"""Abstract Block type holding lines
Block objects hold original lines from the configuration file and hold
a reference to a container wherein the object resides.
"""
def __init__(self, container=None, **kwargs):
self._container = container
self.lines = []
self._updated = False
super().__init__(**kwargs)
def __str__(self):
return ''.join(self.lines)
def __len__(self):
return len(self.lines)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.lines == other.lines
else:
return False
def add_line(self, line):
"""Add a line to the current block
Args:
line (str): one line to add
"""
self.lines.append(line)
return self
@property
def container(self):
return self._container
@property
@property
def add_after(self):
"""Returns a builder inserting a new block after the current block"""
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx+1)
|
pyscaffold/configupdater | src/configupdater/configupdater.py | Block.add_after | python | def add_after(self):
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx+1) | Returns a builder inserting a new block after the current block | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L121-L124 | null | class Block(ABC):
"""Abstract Block type holding lines
Block objects hold original lines from the configuration file and hold
a reference to a container wherein the object resides.
"""
def __init__(self, container=None, **kwargs):
self._container = container
self.lines = []
self._updated = False
super().__init__(**kwargs)
def __str__(self):
return ''.join(self.lines)
def __len__(self):
return len(self.lines)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.lines == other.lines
else:
return False
def add_line(self, line):
"""Add a line to the current block
Args:
line (str): one line to add
"""
self.lines.append(line)
return self
@property
def container(self):
return self._container
@property
def add_before(self):
"""Returns a builder inserting a new block before the current block"""
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx)
@property
|
pyscaffold/configupdater | src/configupdater/configupdater.py | BlockBuilder.comment | python | def comment(self, text, comment_prefix='#'):
comment = Comment(self._container)
if not text.startswith(comment_prefix):
text = "{} {}".format(comment_prefix, text)
if not text.endswith('\n'):
text = "{}{}".format(text, '\n')
comment.add_line(text)
self._container.structure.insert(self._idx, comment)
self._idx += 1
return self | Creates a comment block
Args:
text (str): content of comment without #
comment_prefix (str): character indicating start of comment
Returns:
self for chaining | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L133-L151 | [
"def add_line(self, line):\n \"\"\"Add a line to the current block\n\n Args:\n line (str): one line to add\n \"\"\"\n self.lines.append(line)\n return self\n"
] | class BlockBuilder(object):
"""Builder that injects blocks at a given index position."""
def __init__(self, container, idx):
self._container = container
self._idx = idx
def section(self, section):
"""Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining
"""
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
# create a new section
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self
def space(self, newlines=1):
"""Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining
"""
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self
def option(self, key, value=None, **kwargs):
"""Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
"""
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self
|
pyscaffold/configupdater | src/configupdater/configupdater.py | BlockBuilder.section | python | def section(self, section):
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
# create a new section
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self | Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L153-L174 | null | class BlockBuilder(object):
"""Builder that injects blocks at a given index position."""
def __init__(self, container, idx):
self._container = container
self._idx = idx
def comment(self, text, comment_prefix='#'):
"""Creates a comment block
Args:
text (str): content of comment without #
comment_prefix (str): character indicating start of comment
Returns:
self for chaining
"""
comment = Comment(self._container)
if not text.startswith(comment_prefix):
text = "{} {}".format(comment_prefix, text)
if not text.endswith('\n'):
text = "{}{}".format(text, '\n')
comment.add_line(text)
self._container.structure.insert(self._idx, comment)
self._idx += 1
return self
def space(self, newlines=1):
"""Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining
"""
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self
def option(self, key, value=None, **kwargs):
"""Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
"""
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self
|
pyscaffold/configupdater | src/configupdater/configupdater.py | BlockBuilder.space | python | def space(self, newlines=1):
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self | Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L176-L190 | [
"def add_line(self, line):\n \"\"\"Add a line to the current block\n\n Args:\n line (str): one line to add\n \"\"\"\n self.lines.append(line)\n return self\n"
] | class BlockBuilder(object):
"""Builder that injects blocks at a given index position."""
def __init__(self, container, idx):
self._container = container
self._idx = idx
def comment(self, text, comment_prefix='#'):
"""Creates a comment block
Args:
text (str): content of comment without #
comment_prefix (str): character indicating start of comment
Returns:
self for chaining
"""
comment = Comment(self._container)
if not text.startswith(comment_prefix):
text = "{} {}".format(comment_prefix, text)
if not text.endswith('\n'):
text = "{}{}".format(text, '\n')
comment.add_line(text)
self._container.structure.insert(self._idx, comment)
self._idx += 1
return self
def section(self, section):
"""Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining
"""
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
# create a new section
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self
def option(self, key, value=None, **kwargs):
"""Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
"""
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self
|
pyscaffold/configupdater | src/configupdater/configupdater.py | BlockBuilder.option | python | def option(self, key, value=None, **kwargs):
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self | Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L192-L209 | null | class BlockBuilder(object):
"""Builder that injects blocks at a given index position."""
def __init__(self, container, idx):
self._container = container
self._idx = idx
def comment(self, text, comment_prefix='#'):
"""Creates a comment block
Args:
text (str): content of comment without #
comment_prefix (str): character indicating start of comment
Returns:
self for chaining
"""
comment = Comment(self._container)
if not text.startswith(comment_prefix):
text = "{} {}".format(comment_prefix, text)
if not text.endswith('\n'):
text = "{}{}".format(text, '\n')
comment.add_line(text)
self._container.structure.insert(self._idx, comment)
self._idx += 1
return self
def section(self, section):
"""Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining
"""
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
# create a new section
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self
def space(self, newlines=1):
"""Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining
"""
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self
|
pyscaffold/configupdater | src/configupdater/configupdater.py | Section.add_comment | python | def add_comment(self, line):
if not isinstance(self.last_item, Comment):
comment = Comment(self._structure)
self._structure.append(comment)
self.last_item.add_line(line)
return self | Add a Comment object to the section
Used during initial parsing mainly
Args:
line (str): one line in the comment | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L254-L266 | null | class Section(Block, Container, MutableMapping):
"""Section block holding options
Attributes:
name (str): name of the section
updated (bool): indicates name change or a new section
"""
def __init__(self, name, container, **kwargs):
self._name = name
self._structure = list()
self._updated = False
super().__init__(container=container, **kwargs)
def add_option(self, entry):
"""Add an Option object to the section
Used during initial parsing mainly
Args:
entry (Option): key value pair as Option object
"""
self._structure.append(entry)
return self
def add_space(self, line):
"""Add a Space object to the section
Used during initial parsing mainly
Args:
line (str): one line that defines the space, maybe whitespaces
"""
if not isinstance(self.last_item, Space):
space = Space(self._structure)
self._structure.append(space)
self.last_item.add_line(line)
return self
def _get_option_idx(self, key):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Option) and entry.key == key]
if idx:
return idx[0]
else:
raise ValueError
def __str__(self):
if not self.updated:
s = super().__str__()
else:
s = "[{}]\n".format(self._name)
for entry in self._structure:
s += str(entry)
return s
def __repr__(self):
return '<Section: {}>'.format(self.name)
def __getitem__(self, key):
if key not in self.options():
raise KeyError(key)
return self._structure[self._get_option_idx(key=key)]
def __setitem__(self, key, value):
if key in self:
option = self.__getitem__(key)
option.value = value
else:
option = Option(key, value, container=self)
option.value = value
self._structure.append(option)
def __delitem__(self, key):
if key not in self.options():
raise KeyError(key)
idx = self._get_option_idx(key=key)
del self._structure[idx]
def __contains__(self, key):
return key in self.options()
def __len__(self):
return len(self._structure)
def __iter__(self):
"""Return all entries, not just options"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.name == other.name and
self._structure == other._structure)
else:
return False
def option_blocks(self):
"""Returns option blocks
Returns:
list: list of :class:`Option` blocks
"""
return [entry for entry in self._structure
if isinstance(entry, Option)]
def options(self):
"""Returns option names
Returns:
list: list of option names as strings
"""
return [option.key for option in self.option_blocks()]
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {key: self.__getitem__(key).value for key in self.options()}
@property
def updated(self):
"""Returns if the option was changed/updated"""
# if no lines were added, treat it as updated since we added it
return self._updated or not self.lines
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
self._updated = True
def set(self, option, value=None):
"""Set an option for chaining.
Args:
option (str): option name
value (str): value, default None
"""
option = self._container.optionxform(option)
if option in self.options():
self.__getitem__(option).value = value
else:
self.__setitem__(option, value)
return self
def insert_at(self, idx):
"""Returns a builder inserting a new block at the given index
Args:
idx (int): index where to insert
"""
return BlockBuilder(self, idx)
|
pyscaffold/configupdater | src/configupdater/configupdater.py | Section.add_space | python | def add_space(self, line):
if not isinstance(self.last_item, Space):
space = Space(self._structure)
self._structure.append(space)
self.last_item.add_line(line)
return self | Add a Space object to the section
Used during initial parsing mainly
Args:
line (str): one line that defines the space, maybe whitespaces | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L268-L280 | null | class Section(Block, Container, MutableMapping):
"""Section block holding options
Attributes:
name (str): name of the section
updated (bool): indicates name change or a new section
"""
def __init__(self, name, container, **kwargs):
self._name = name
self._structure = list()
self._updated = False
super().__init__(container=container, **kwargs)
def add_option(self, entry):
"""Add an Option object to the section
Used during initial parsing mainly
Args:
entry (Option): key value pair as Option object
"""
self._structure.append(entry)
return self
def add_comment(self, line):
"""Add a Comment object to the section
Used during initial parsing mainly
Args:
line (str): one line in the comment
"""
if not isinstance(self.last_item, Comment):
comment = Comment(self._structure)
self._structure.append(comment)
self.last_item.add_line(line)
return self
def _get_option_idx(self, key):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Option) and entry.key == key]
if idx:
return idx[0]
else:
raise ValueError
def __str__(self):
if not self.updated:
s = super().__str__()
else:
s = "[{}]\n".format(self._name)
for entry in self._structure:
s += str(entry)
return s
def __repr__(self):
return '<Section: {}>'.format(self.name)
def __getitem__(self, key):
if key not in self.options():
raise KeyError(key)
return self._structure[self._get_option_idx(key=key)]
def __setitem__(self, key, value):
if key in self:
option = self.__getitem__(key)
option.value = value
else:
option = Option(key, value, container=self)
option.value = value
self._structure.append(option)
def __delitem__(self, key):
if key not in self.options():
raise KeyError(key)
idx = self._get_option_idx(key=key)
del self._structure[idx]
def __contains__(self, key):
return key in self.options()
def __len__(self):
return len(self._structure)
def __iter__(self):
"""Return all entries, not just options"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.name == other.name and
self._structure == other._structure)
else:
return False
def option_blocks(self):
"""Returns option blocks
Returns:
list: list of :class:`Option` blocks
"""
return [entry for entry in self._structure
if isinstance(entry, Option)]
def options(self):
"""Returns option names
Returns:
list: list of option names as strings
"""
return [option.key for option in self.option_blocks()]
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {key: self.__getitem__(key).value for key in self.options()}
@property
def updated(self):
"""Returns if the option was changed/updated"""
# if no lines were added, treat it as updated since we added it
return self._updated or not self.lines
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
self._updated = True
def set(self, option, value=None):
"""Set an option for chaining.
Args:
option (str): option name
value (str): value, default None
"""
option = self._container.optionxform(option)
if option in self.options():
self.__getitem__(option).value = value
else:
self.__setitem__(option, value)
return self
def insert_at(self, idx):
"""Returns a builder inserting a new block at the given index
Args:
idx (int): index where to insert
"""
return BlockBuilder(self, idx)
|
pyscaffold/configupdater | src/configupdater/configupdater.py | Section.to_dict | python | def to_dict(self):
return {key: self.__getitem__(key).value for key in self.options()} | Transform to dictionary
Returns:
dict: dictionary with same content | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L356-L362 | [
"def options(self):\n \"\"\"Returns option names\n\n Returns:\n list: list of option names as strings\n \"\"\"\n return [option.key for option in self.option_blocks()]\n"
] | class Section(Block, Container, MutableMapping):
"""Section block holding options
Attributes:
name (str): name of the section
updated (bool): indicates name change or a new section
"""
def __init__(self, name, container, **kwargs):
self._name = name
self._structure = list()
self._updated = False
super().__init__(container=container, **kwargs)
def add_option(self, entry):
"""Add an Option object to the section
Used during initial parsing mainly
Args:
entry (Option): key value pair as Option object
"""
self._structure.append(entry)
return self
def add_comment(self, line):
"""Add a Comment object to the section
Used during initial parsing mainly
Args:
line (str): one line in the comment
"""
if not isinstance(self.last_item, Comment):
comment = Comment(self._structure)
self._structure.append(comment)
self.last_item.add_line(line)
return self
def add_space(self, line):
"""Add a Space object to the section
Used during initial parsing mainly
Args:
line (str): one line that defines the space, maybe whitespaces
"""
if not isinstance(self.last_item, Space):
space = Space(self._structure)
self._structure.append(space)
self.last_item.add_line(line)
return self
def _get_option_idx(self, key):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Option) and entry.key == key]
if idx:
return idx[0]
else:
raise ValueError
def __str__(self):
if not self.updated:
s = super().__str__()
else:
s = "[{}]\n".format(self._name)
for entry in self._structure:
s += str(entry)
return s
def __repr__(self):
return '<Section: {}>'.format(self.name)
def __getitem__(self, key):
if key not in self.options():
raise KeyError(key)
return self._structure[self._get_option_idx(key=key)]
def __setitem__(self, key, value):
if key in self:
option = self.__getitem__(key)
option.value = value
else:
option = Option(key, value, container=self)
option.value = value
self._structure.append(option)
def __delitem__(self, key):
if key not in self.options():
raise KeyError(key)
idx = self._get_option_idx(key=key)
del self._structure[idx]
def __contains__(self, key):
return key in self.options()
def __len__(self):
return len(self._structure)
def __iter__(self):
"""Return all entries, not just options"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.name == other.name and
self._structure == other._structure)
else:
return False
def option_blocks(self):
"""Returns option blocks
Returns:
list: list of :class:`Option` blocks
"""
return [entry for entry in self._structure
if isinstance(entry, Option)]
def options(self):
"""Returns option names
Returns:
list: list of option names as strings
"""
return [option.key for option in self.option_blocks()]
@property
def updated(self):
"""Returns if the option was changed/updated"""
# if no lines were added, treat it as updated since we added it
return self._updated or not self.lines
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
self._updated = True
def set(self, option, value=None):
"""Set an option for chaining.
Args:
option (str): option name
value (str): value, default None
"""
option = self._container.optionxform(option)
if option in self.options():
self.__getitem__(option).value = value
else:
self.__setitem__(option, value)
return self
def insert_at(self, idx):
"""Returns a builder inserting a new block at the given index
Args:
idx (int): index where to insert
"""
return BlockBuilder(self, idx)
|
pyscaffold/configupdater | src/configupdater/configupdater.py | Section.set | python | def set(self, option, value=None):
option = self._container.optionxform(option)
if option in self.options():
self.__getitem__(option).value = value
else:
self.__setitem__(option, value)
return self | Set an option for chaining.
Args:
option (str): option name
value (str): value, default None | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L379-L391 | [
"def options(self):\n \"\"\"Returns option names\n\n Returns:\n list: list of option names as strings\n \"\"\"\n return [option.key for option in self.option_blocks()]\n"
] | class Section(Block, Container, MutableMapping):
"""Section block holding options
Attributes:
name (str): name of the section
updated (bool): indicates name change or a new section
"""
def __init__(self, name, container, **kwargs):
self._name = name
self._structure = list()
self._updated = False
super().__init__(container=container, **kwargs)
def add_option(self, entry):
"""Add an Option object to the section
Used during initial parsing mainly
Args:
entry (Option): key value pair as Option object
"""
self._structure.append(entry)
return self
def add_comment(self, line):
"""Add a Comment object to the section
Used during initial parsing mainly
Args:
line (str): one line in the comment
"""
if not isinstance(self.last_item, Comment):
comment = Comment(self._structure)
self._structure.append(comment)
self.last_item.add_line(line)
return self
def add_space(self, line):
"""Add a Space object to the section
Used during initial parsing mainly
Args:
line (str): one line that defines the space, maybe whitespaces
"""
if not isinstance(self.last_item, Space):
space = Space(self._structure)
self._structure.append(space)
self.last_item.add_line(line)
return self
def _get_option_idx(self, key):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Option) and entry.key == key]
if idx:
return idx[0]
else:
raise ValueError
def __str__(self):
if not self.updated:
s = super().__str__()
else:
s = "[{}]\n".format(self._name)
for entry in self._structure:
s += str(entry)
return s
def __repr__(self):
return '<Section: {}>'.format(self.name)
def __getitem__(self, key):
if key not in self.options():
raise KeyError(key)
return self._structure[self._get_option_idx(key=key)]
def __setitem__(self, key, value):
if key in self:
option = self.__getitem__(key)
option.value = value
else:
option = Option(key, value, container=self)
option.value = value
self._structure.append(option)
def __delitem__(self, key):
if key not in self.options():
raise KeyError(key)
idx = self._get_option_idx(key=key)
del self._structure[idx]
def __contains__(self, key):
return key in self.options()
def __len__(self):
return len(self._structure)
def __iter__(self):
"""Return all entries, not just options"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.name == other.name and
self._structure == other._structure)
else:
return False
def option_blocks(self):
"""Returns option blocks
Returns:
list: list of :class:`Option` blocks
"""
return [entry for entry in self._structure
if isinstance(entry, Option)]
def options(self):
"""Returns option names
Returns:
list: list of option names as strings
"""
return [option.key for option in self.option_blocks()]
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {key: self.__getitem__(key).value for key in self.options()}
@property
def updated(self):
"""Returns if the option was changed/updated"""
# if no lines were added, treat it as updated since we added it
return self._updated or not self.lines
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
self._updated = True
def insert_at(self, idx):
"""Returns a builder inserting a new block at the given index
Args:
idx (int): index where to insert
"""
return BlockBuilder(self, idx)
|
pyscaffold/configupdater | src/configupdater/configupdater.py | Option.set_values | python | def set_values(self, values, separator='\n', indent=4*' '):
self._updated = True
self._multiline_value_joined = True
self._values = values
if separator == '\n':
values.insert(0, '')
separator = separator + indent
self._value = separator.join(values) | Sets the value to a given list of options, e.g. multi-line values
Args:
values (list): list of values
separator (str): separator for values, default: line separator
indent (str): indentation depth in case of line separator | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L478-L492 | null | class Option(Block):
"""Option block holding a key/value pair.
Attributes:
key (str): name of the key
value (str): stored value
updated (bool): indicates name change or a new section
"""
def __init__(self, key, value, container, delimiter='=',
space_around_delimiters=True, line=None):
super().__init__(container=container)
self._key = key
self._values = [value]
self._value_is_none = value is None
self._delimiter = delimiter
self._value = None # will be filled after join_multiline_value
self._updated = False
self._multiline_value_joined = False
self._space_around_delimiters = space_around_delimiters
if line:
self.lines.append(line)
def add_line(self, line):
super().add_line(line)
self._values.append(line.strip())
def _join_multiline_value(self):
if not self._multiline_value_joined and not self._value_is_none:
# do what `_join_multiline_value` in ConfigParser would do
self._value = '\n'.join(self._values).rstrip()
self._multiline_value_joined = True
def __str__(self):
if not self.updated:
return super().__str__()
if self._value is None:
return "{}{}".format(self._key, '\n')
if self._space_around_delimiters:
# no space is needed if we use multi-line arguments
suffix = '' if str(self._value).startswith('\n') else ' '
delim = " {}{}".format(self._delimiter, suffix)
else:
delim = self._delimiter
return "{}{}{}{}".format(self._key, delim, self._value, '\n')
def __repr__(self):
return '<Option: {} = {}>'.format(self.key, self.value)
@property
def updated(self):
"""Returns if the option was changed/updated"""
# if no lines were added, treat it as updated since we added it
return self._updated or not self.lines
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._join_multiline_value()
self._key = value
self._updated = True
@property
def value(self):
self._join_multiline_value()
return self._value
@value.setter
def value(self, value):
self._updated = True
self._multiline_value_joined = True
self._value = value
self._values = [value]
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.read | python | def read(self, filename, encoding=None):
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename) | Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L597-L606 | [
"def _read(self, fp, fpname):\n \"\"\"Parse a sectioned configuration file.\n\n Each section in a configuration file contains a header, indicated by\n a name in square brackets (`[]`), plus key/value options, indicated by\n `name` and `value` delimited with a specific substring (`=` or `:` by\n defau... | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.update_file | python | def update_file(self):
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb) | Update the read-in configuration file. | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L825-L831 | [
"def write(self, fp):\n \"\"\"Write an .ini-format representation of the configuration state.\n\n Args:\n fp (file-like object): open file handle\n \"\"\"\n fp.write(str(self))\n"
] | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.validate_format | python | def validate_format(self, **kwargs):
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg) | Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser` | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L833-L849 | null | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.add_section | python | def add_section(self, section):
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section) | Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L912-L928 | [
"def sections(self):\n \"\"\"Return a list of section names\n\n Returns:\n list: list of section names\n \"\"\"\n return [section.name for section in self.sections_blocks()]\n"
] | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.options | python | def options(self, section):
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options() | Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L941-L952 | [
"def has_section(self, section):\n \"\"\"Returns whether the given section exists.\n\n Args:\n section (str): name of section\n\n Returns:\n bool: wether the section exists\n \"\"\"\n return section in self.sections()\n"
] | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.get | python | def get(self, section, option):
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value | Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L954-L974 | [
"def has_section(self, section):\n \"\"\"Returns whether the given section exists.\n\n Args:\n section (str): name of section\n\n Returns:\n bool: wether the section exists\n \"\"\"\n return section in self.sections()\n"
] | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.items | python | def items(self, section=_UNSET):
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()] | Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L976-L993 | [
"def sections_blocks(self):\n \"\"\"Returns all section blocks\n\n Returns:\n list: list of :class:`Section` blocks\n \"\"\"\n return [block for block in self._structure\n if isinstance(block, Section)]\n",
"def __getitem__(self, key):\n for section in self.sections_blocks():\n ... | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.has_option | python | def has_option(self, section, option):
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section] | Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L995-L1009 | [
"def sections(self):\n \"\"\"Return a list of section names\n\n Returns:\n list: list of section names\n \"\"\"\n return [section.name for section in self.sections_blocks()]\n"
] | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.set | python | def set(self, section, option, value=None):
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self | Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L1011-L1028 | [
"def optionxform(self, optionstr):\n \"\"\"Converts an option key to lower case for unification\n\n Args:\n optionstr (str): key name\n\n Returns:\n str: unified option name\n \"\"\"\n return optionstr.lower()\n",
"def __getitem__(self, key):\n for section in self.sections_blocks(... | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.remove_option | python | def remove_option(self, section, option):
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed | Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L1030-L1048 | [
"def optionxform(self, optionstr):\n \"\"\"Converts an option key to lower case for unification\n\n Args:\n optionstr (str): key name\n\n Returns:\n str: unified option name\n \"\"\"\n return optionstr.lower()\n",
"def __getitem__(self, key):\n for section in self.sections_blocks(... | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.remove_section | python | def remove_section(self, name):
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed | Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L1050-L1063 | [
"def _get_section_idx(self, name):\n idx = [i for i, entry in enumerate(self._structure)\n if isinstance(entry, Section) and entry.name == name]\n if idx:\n return idx[0]\n else:\n raise ValueError\n",
"def has_section(self, section):\n \"\"\"Returns whether the given section e... | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def to_dict(self):
"""Transform to dictionary
Returns:
dict: dictionary with same content
"""
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()}
|
pyscaffold/configupdater | src/configupdater/configupdater.py | ConfigUpdater.to_dict | python | def to_dict(self):
return {sect: self.__getitem__(sect).to_dict()
for sect in self.sections()} | Transform to dictionary
Returns:
dict: dictionary with same content | train | https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L1065-L1072 | [
"def sections(self):\n \"\"\"Return a list of section names\n\n Returns:\n list: list of section names\n \"\"\"\n return [section.name for section in self.sections_blocks()]\n"
] | class ConfigUpdater(Container, MutableMapping):
"""Parser for updating configuration files.
ConfigUpdater follows the API of ConfigParser with some differences:
* inline comments are treated as part of a key's value,
* only a single config file can be updated at a time,
* empty lines in values are not valid,
* the original case of sections and keys are kept,
* control over the position of a new section/key.
Following features are **deliberately not** implemented:
* interpolation of values,
* propagation of parameters from the default section,
* conversions of values,
* passing key/value-pairs with ``default`` argument,
* non-strict mode allowing duplicate sections and keys.
"""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
def __init__(self, allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, space_around_delimiters=True):
"""Constructor of ConfigUpdater
Args:
allow_no_value (bool): allow keys without a value, default False
delimiters (tuple): delimiters for key/value pairs, default =, :
comment_prefixes (tuple): prefix of comments, default # and ;
inline_comment_prefixes (tuple): prefix of inline comment,
default None
strict (bool): each section must be unique as well as every key
within a section, default True
space_around_delimiters (bool): add a space before and after the
delimiter, default True
"""
self._filename = None
self._space_around_delimiters = space_around_delimiters
self._dict = _default_dict # no reason to let the user change this
# keeping _sections to keep code aligned with ConfigParser but
# _structure takes the actual role instead. Only use self._structure!
self._sections = self._dict()
self._structure = []
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
# Options from ConfigParser that we need to set constantly
self._empty_lines_in_values = False
super().__init__()
def _get_section_idx(self, name):
idx = [i for i, entry in enumerate(self._structure)
if isinstance(entry, Section) and entry.name == name]
if idx:
return idx[0]
else:
raise ValueError
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
"""
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename)
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The ``f`` argument must be iterable, returning one line at a time.
Optional second argument is the ``source`` specifying the name of the
file being read. If not given, it is taken from f.name. If ``f`` has no
``name`` attribute, ``<???>`` is used.
Args:
f: file like object
source (str): reference name for file object, default None
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string.
Args:
string (str): string containing a configuration
source (str): reference name for file object, default '<string>'
"""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def optionxform(self, optionstr):
"""Converts an option key to lower case for unification
Args:
optionstr (str): key name
Returns:
str: unified option name
"""
return optionstr.lower()
def _update_curr_block(self, block_type):
if not isinstance(self.last_item, block_type):
new_block = block_type(container=self)
self._structure.append(new_block)
def _add_comment(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_comment(line)
else:
self._update_curr_block(Comment)
self.last_item.add_line(line)
def _add_section(self, sectname, line):
new_section = Section(sectname, container=self)
new_section.add_line(line)
self._structure.append(new_section)
def _add_option(self, key, vi, value, line):
entry = Option(
key, value,
delimiter=vi,
container=self.last_item,
space_around_delimiters=self._space_around_delimiters,
line=line)
self.last_item.add_option(entry)
def _add_space(self, line):
if isinstance(self.last_item, Section):
self.last_item.add_space(line)
else:
self._update_curr_block(Space)
self.last_item.add_line(line)
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]`), plus key/value options, indicated by
`name` and `value` delimited with a specific substring (`=` or `:` by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#` and `;` by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
Note: This method was borrowed from ConfigParser and we keep this
mess here as close as possible to the original messod (pardon
this german pun) for consistency reasons and later upgrades.
"""
self._structure = []
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
self._add_comment(line) # HOOK
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
self.last_item.last_item.add_line(line) # HOOK
else:
# empty line marks end of value
indent_level = sys.maxsize
if comment_start is None:
self._add_space(line)
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
self.last_item.last_item.add_line(line) # HOOK
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
else:
cursect = self._dict()
self._sections[sectname] = cursect
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
self._add_section(sectname, line) # HOOK
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
self._add_option(optname, vi, optval, line) # HOOK
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def write(self, fp):
"""Write an .ini-format representation of the configuration state.
Args:
fp (file-like object): open file handle
"""
fp.write(str(self))
def update_file(self):
"""Update the read-in configuration file.
"""
if self._filename is None:
raise NoConfigFileReadError()
with open(self._filename, 'w') as fb:
self.write(fb)
def validate_format(self, **kwargs):
"""Call ConfigParser to validate config
Args:
kwargs: are passed to :class:`configparser.ConfigParser`
"""
args = dict(
dict_type=self._dict,
allow_no_value=self._allow_no_value,
inline_comment_prefixes=self._inline_comment_prefixes,
strict=self._strict,
empty_lines_in_values=self._empty_lines_in_values
)
args.update(kwargs)
parser = ConfigParser(**args)
updated_cfg = str(self)
parser.read_string(updated_cfg)
def sections_blocks(self):
"""Returns all section blocks
Returns:
list: list of :class:`Section` blocks
"""
return [block for block in self._structure
if isinstance(block, Section)]
def sections(self):
"""Return a list of section names
Returns:
list: list of section names
"""
return [section.name for section in self.sections_blocks()]
def __str__(self):
return ''.join(str(block) for block in self._structure)
def __getitem__(self, key):
for section in self.sections_blocks():
if section.name == key:
return section
else:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, Section):
raise ValueError("Value must be of type Section!")
if isinstance(key, str) and key in self:
idx = self._get_section_idx(key)
del self._structure[idx]
self._structure.insert(idx, value)
else:
# name the section by the key
value.name = key
self.add_section(value)
def __delitem__(self, section):
if not self.has_section(section):
raise KeyError(section)
self.remove_section(section)
def __contains__(self, key):
return self.has_section(key)
def __len__(self):
"""Number of all blocks, not just sections"""
return len(self._structure)
def __iter__(self):
"""Iterate over all blocks, not just sections"""
return self._structure.__iter__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._structure == other._structure
else:
return False
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
"""
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
# create a new section
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
def has_section(self, section):
"""Returns whether the given section exists.
Args:
section (str): name of section
Returns:
bool: wether the section exists
"""
return section in self.sections()
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
"""
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options()
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value
def items(self, section=_UNSET):
"""Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects
"""
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()]
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
"""
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section]
def set(self, section, option, value=None):
"""Set an option.
Args:
section (str): section name
option (str): option name
value (str): value, default None
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
if option in section:
section[option].value = value
else:
section[option] = value
return self
def remove_option(self, section, option):
"""Remove an option.
Args:
section (str): section name
option (str): option name
Returns:
bool: whether the option was actually removed
"""
try:
section = self.__getitem__(section)
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in section.options()
if existed:
del section[option]
return existed
def remove_section(self, name):
"""Remove a file section.
Args:
name: name of the section
Returns:
bool: whether the section was actually removed
"""
existed = self.has_section(name)
if existed:
idx = self._get_section_idx(name)
del self._structure[idx]
return existed
|
commonsense/metanl | metanl/extprocess.py | unicode_is_punctuation | python | def unicode_is_punctuation(text):
for char in str_func(text):
category = unicodedata.category(char)[0]
if category not in 'PSZMC':
return False
return True | Test if a token is made entirely of Unicode characters of the following
classes:
- P: punctuation
- S: symbols
- Z: separators
- M: combining marks
- C: control characters
>>> unicode_is_punctuation('word')
False
>>> unicode_is_punctuation('。')
True
>>> unicode_is_punctuation('-')
True
>>> unicode_is_punctuation('-3')
False
>>> unicode_is_punctuation('あ')
False | train | https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L246-L272 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Tools for using an external program as an NLP pipe. See, for example,
freeling.py.
"""
import subprocess
import unicodedata
import sys
from ftfy.fixes import remove_control_chars, remove_unsafe_private_use
if sys.version_info.major == 2:
range = xrange
str_func = unicode
else:
str_func = str
def render_safe(text):
'''
Make sure the given text is safe to pass to an external process.
'''
return remove_control_chars(remove_unsafe_private_use(text))
class ProcessError(IOError):
"""
A subclass of IOError raised when we can't start the external process.
"""
pass
class ProcessWrapper(object):
"""
A ProcessWrapper uses the `subprocess` module to keep a process open that
we can pipe stuff through to get NLP results.
Instead of every instance immediately opening a process, however, it waits
until the first time it is needed, then starts the process.
Many methods are intended to be implemented by subclasses of ProcessWrapper
that actually know what program they're talking to.
"""
def __del__(self):
"""
Clean up by closing the pipe.
"""
if hasattr(self, '_process'):
self._process.stdin.close()
@property
def process(self):
"""
Store the actual process in _process. If it doesn't exist yet, create
it.
"""
if hasattr(self, '_process'):
return self._process
else:
self._process = self._get_process()
return self._process
def _get_command(self):
"""
This method should return the command to run, as a list
of arguments that can be used by subprocess.Popen.
"""
raise NotImplementedError
def _get_process(self):
"""
Create the process by running the specified command.
"""
command = self._get_command()
return subprocess.Popen(command, bufsize=-1, close_fds=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
def get_record_root(self, record):
"""
Given a *record* (the data that the external process returns for a
given single token), this specifies how to extract its root word
(aka its lemma).
"""
raise NotImplementedError
def get_record_token(self, record):
"""
Given a record, this specifies how to extract the exact word or token
that was processed.
"""
raise NotImplementedError
def analyze(self, text):
"""
Take text as input, run it through the external process, and return a
list of *records* containing the results.
"""
raise NotImplementedError
def send_input(self, data):
self.process.stdin.write(data)
self.process.stdin.flush()
def receive_output_line(self):
line = self.process.stdout.readline()
if not line:
raise ProcessError("reached end of output")
return line
def restart_process(self):
if hasattr(self, '_process'):
self._process.stdin.close()
self._process = self._get_process()
return self._process
def tokenize_list(self, text):
"""
Split a text into separate words.
"""
return [self.get_record_token(record) for record in self.analyze(text)]
def tokenize(self, text):
"""
Yell at people who are still using simplenlp's bad idea of
tokenization.
"""
raise NotImplementedError("tokenize is deprecated. Use tokenize_list.")
def is_stopword_record(self, record, common_words=False):
"""
Given a record, return whether it represents a stopword (a word that
should be discarded in NLP results).
Note that we want very few words to be stopwords. Words that are
meaningful but simply common can be recognized by their very high word
frequency, and handled appropriately. Often, we only want determiners
(such as 'a', 'an', and 'the' in English) to be stopwords.
Takes in a vestigial parameter, `common_words`, and ignores it.
"""
raise NotImplementedError
def is_stopword(self, text):
"""
Determine whether a single word is a stopword, or whether a short
phrase is made entirely of stopwords, disregarding context.
Use of this function should be avoided; it's better to give the text
in context and let the process determine which words are the stopwords.
"""
found_content_word = False
for record in self.analyze(text):
if not self.is_stopword_record(record):
found_content_word = True
break
return not found_content_word
def get_record_pos(self, record):
"""
Given a record, get the word's part of speech.
This default implementation simply distinguishes stopwords from
non-stopwords.
"""
if self.is_stopword_record(record):
return 'STOP'
else:
return 'TERM'
def normalize_list(self, text, cache=None):
"""
Get a canonical list representation of text, with words
separated and reduced to their base forms.
TODO: use the cache.
"""
words = []
analysis = self.analyze(text)
for record in analysis:
if not self.is_stopword_record(record):
words.append(self.get_record_root(record))
if not words:
# Don't discard stopwords if that's all you've got
words = [self.get_record_token(record) for record in analysis]
return words
def normalize(self, text, cache=None):
"""
Get a canonical string representation of this text, like
:meth:`normalize_list` but joined with spaces.
TODO: use the cache.
"""
return ' '.join(self.normalize_list(text, cache))
def tag_and_stem(self, text, cache=None):
"""
Given some text, return a sequence of (stem, pos, text) triples as
appropriate for the reader. `pos` can be as general or specific as
necessary (for example, it might label all parts of speech, or it might
only distinguish function words from others).
Twitter-style hashtags and at-mentions have the stem and pos they would
have without the leading # or @. For instance, if the reader's triple
for "thing" is ('thing', 'NN', 'things'), then "#things" would come out
as ('thing', 'NN', '#things').
"""
analysis = self.analyze(text)
triples = []
for record in analysis:
root = self.get_record_root(record)
token = self.get_record_token(record)
if token:
if unicode_is_punctuation(token):
triples.append((token, '.', token))
else:
pos = self.get_record_pos(record)
triples.append((root, pos, token))
return triples
def extract_phrases(self, text):
"""
Given some text, extract phrases of up to 2 content words,
and map their normalized form to the complete phrase.
"""
analysis = self.analyze(text)
for pos1 in range(len(analysis)):
rec1 = analysis[pos1]
if not self.is_stopword_record(rec1):
yield self.get_record_root(rec1), rec1[0]
for pos2 in range(pos1 + 1, len(analysis)):
rec2 = analysis[pos2]
if not self.is_stopword_record(rec2):
roots = [self.get_record_root(rec1),
self.get_record_root(rec2)]
pieces = [analysis[i][0] for i in range(pos1, pos2+1)]
term = ' '.join(roots)
phrase = ''.join(pieces)
yield term, phrase
break
|
commonsense/metanl | metanl/extprocess.py | ProcessWrapper.process | python | def process(self):
if hasattr(self, '_process'):
return self._process
else:
self._process = self._get_process()
return self._process | Store the actual process in _process. If it doesn't exist yet, create
it. | train | https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L52-L61 | [
"def _get_process(self):\n \"\"\"\n Create the process by running the specified command.\n \"\"\"\n command = self._get_command()\n return subprocess.Popen(command, bufsize=-1, close_fds=True,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE)\n"
... | class ProcessWrapper(object):
"""
A ProcessWrapper uses the `subprocess` module to keep a process open that
we can pipe stuff through to get NLP results.
Instead of every instance immediately opening a process, however, it waits
until the first time it is needed, then starts the process.
Many methods are intended to be implemented by subclasses of ProcessWrapper
that actually know what program they're talking to.
"""
def __del__(self):
"""
Clean up by closing the pipe.
"""
if hasattr(self, '_process'):
self._process.stdin.close()
@property
def _get_command(self):
"""
This method should return the command to run, as a list
of arguments that can be used by subprocess.Popen.
"""
raise NotImplementedError
def _get_process(self):
"""
Create the process by running the specified command.
"""
command = self._get_command()
return subprocess.Popen(command, bufsize=-1, close_fds=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
def get_record_root(self, record):
"""
Given a *record* (the data that the external process returns for a
given single token), this specifies how to extract its root word
(aka its lemma).
"""
raise NotImplementedError
def get_record_token(self, record):
"""
Given a record, this specifies how to extract the exact word or token
that was processed.
"""
raise NotImplementedError
def analyze(self, text):
"""
Take text as input, run it through the external process, and return a
list of *records* containing the results.
"""
raise NotImplementedError
def send_input(self, data):
self.process.stdin.write(data)
self.process.stdin.flush()
def receive_output_line(self):
line = self.process.stdout.readline()
if not line:
raise ProcessError("reached end of output")
return line
def restart_process(self):
if hasattr(self, '_process'):
self._process.stdin.close()
self._process = self._get_process()
return self._process
def tokenize_list(self, text):
"""
Split a text into separate words.
"""
return [self.get_record_token(record) for record in self.analyze(text)]
def tokenize(self, text):
"""
Yell at people who are still using simplenlp's bad idea of
tokenization.
"""
raise NotImplementedError("tokenize is deprecated. Use tokenize_list.")
def is_stopword_record(self, record, common_words=False):
"""
Given a record, return whether it represents a stopword (a word that
should be discarded in NLP results).
Note that we want very few words to be stopwords. Words that are
meaningful but simply common can be recognized by their very high word
frequency, and handled appropriately. Often, we only want determiners
(such as 'a', 'an', and 'the' in English) to be stopwords.
Takes in a vestigial parameter, `common_words`, and ignores it.
"""
raise NotImplementedError
def is_stopword(self, text):
"""
Determine whether a single word is a stopword, or whether a short
phrase is made entirely of stopwords, disregarding context.
Use of this function should be avoided; it's better to give the text
in context and let the process determine which words are the stopwords.
"""
found_content_word = False
for record in self.analyze(text):
if not self.is_stopword_record(record):
found_content_word = True
break
return not found_content_word
def get_record_pos(self, record):
"""
Given a record, get the word's part of speech.
This default implementation simply distinguishes stopwords from
non-stopwords.
"""
if self.is_stopword_record(record):
return 'STOP'
else:
return 'TERM'
def normalize_list(self, text, cache=None):
"""
Get a canonical list representation of text, with words
separated and reduced to their base forms.
TODO: use the cache.
"""
words = []
analysis = self.analyze(text)
for record in analysis:
if not self.is_stopword_record(record):
words.append(self.get_record_root(record))
if not words:
# Don't discard stopwords if that's all you've got
words = [self.get_record_token(record) for record in analysis]
return words
def normalize(self, text, cache=None):
"""
Get a canonical string representation of this text, like
:meth:`normalize_list` but joined with spaces.
TODO: use the cache.
"""
return ' '.join(self.normalize_list(text, cache))
def tag_and_stem(self, text, cache=None):
"""
Given some text, return a sequence of (stem, pos, text) triples as
appropriate for the reader. `pos` can be as general or specific as
necessary (for example, it might label all parts of speech, or it might
only distinguish function words from others).
Twitter-style hashtags and at-mentions have the stem and pos they would
have without the leading # or @. For instance, if the reader's triple
for "thing" is ('thing', 'NN', 'things'), then "#things" would come out
as ('thing', 'NN', '#things').
"""
analysis = self.analyze(text)
triples = []
for record in analysis:
root = self.get_record_root(record)
token = self.get_record_token(record)
if token:
if unicode_is_punctuation(token):
triples.append((token, '.', token))
else:
pos = self.get_record_pos(record)
triples.append((root, pos, token))
return triples
def extract_phrases(self, text):
"""
Given some text, extract phrases of up to 2 content words,
and map their normalized form to the complete phrase.
"""
analysis = self.analyze(text)
for pos1 in range(len(analysis)):
rec1 = analysis[pos1]
if not self.is_stopword_record(rec1):
yield self.get_record_root(rec1), rec1[0]
for pos2 in range(pos1 + 1, len(analysis)):
rec2 = analysis[pos2]
if not self.is_stopword_record(rec2):
roots = [self.get_record_root(rec1),
self.get_record_root(rec2)]
pieces = [analysis[i][0] for i in range(pos1, pos2+1)]
term = ' '.join(roots)
phrase = ''.join(pieces)
yield term, phrase
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.